From f3306cff658c9dad6934c72726d098d37dd31dfc Mon Sep 17 00:00:00 2001 From: Toni Date: Fri, 28 May 2021 09:51:47 +0300 Subject: [PATCH 001/160] run black & isort --- docs/v1/source/conf.py | 75 +- docs/v2/source/conf.py | 75 +- src/metax_api/__init__.py | 2 +- .../api/oaipmh/base/metax_oai_server.py | 407 +- src/metax_api/api/oaipmh/base/view.py | 97 +- src/metax_api/api/rest/base/router.py | 52 +- .../api/rest/base/serializers/__init__.py | 2 +- .../serializers/catalog_record_serializer.py | 579 ++- .../base/serializers/common_serializer.py | 151 +- .../base/serializers/contract_serializer.py | 9 +- .../serializers/data_catalog_serializer.py | 54 +- .../base/serializers/directory_serializer.py | 72 +- .../rest/base/serializers/file_serializer.py | 199 +- .../serializers/file_storage_serializer.py | 17 +- .../rest/base/serializers/serializer_utils.py | 19 +- .../serializers/xml_metadata_serializer.py | 13 +- .../api/rest/base/views/api_error_view.py | 14 +- .../api/rest/base/views/common_view.py | 101 +- .../api/rest/base/views/contract_view.py | 15 +- .../api/rest/base/views/data_catalog_view.py | 7 +- .../api/rest/base/views/dataset_view.py | 83 +- .../api/rest/base/views/directory_view.py | 79 +- .../api/rest/base/views/file_storage_view.py | 7 +- .../api/rest/base/views/file_view.py | 114 +- .../api/rest/base/views/schema_view.py | 6 +- src/metax_api/api/rest/v2/router.py | 87 +- .../serializers/catalog_record_serializer.py | 93 +- .../v2/serializers/data_catalog_serializer.py | 26 +- .../api/rest/v2/views/dataset_view.py | 64 +- src/metax_api/api/rpc/base/router.py | 11 +- .../api/rpc/base/views/common_rpc.py | 4 +- .../api/rpc/base/views/dataset_rpc.py | 119 +- .../api/rpc/base/views/elasticsearch_rpc.py | 47 +- src/metax_api/api/rpc/base/views/file_rpc.py | 41 +- .../api/rpc/base/views/statistic_rpc.py | 114 +- src/metax_api/api/rpc/v2/router.py | 9 +- src/metax_api/api/rpc/v2/views/dataset_rpc.py | 50 +- src/metax_api/checks.py | 8 +- src/metax_api/exceptions/http_exceptions.py | 11 +- .../management/commands/first_time_setup.py | 2 +- .../management/commands/fix_file_counts.py | 5 +- .../management/commands/fix_file_counts_cr.py | 7 +- .../commands/fix_file_counts_for_cr.py | 3 +- .../management/commands/load_data_to_TTV.py | 12 +- .../management/commands/loadinitialdata.py | 11 +- src/metax_api/models/catalog_record.py | 2001 ++++---- src/metax_api/models/catalog_record_v2.py | 758 +-- src/metax_api/models/common.py | 75 +- src/metax_api/models/contract.py | 23 +- src/metax_api/models/data_catalog.py | 64 +- src/metax_api/models/directory.py | 89 +- src/metax_api/models/file.py | 49 +- src/metax_api/models/file_storage.py | 16 +- src/metax_api/models/metax_user.py | 1 + src/metax_api/models/xml_metadata.py | 4 +- src/metax_api/parsers/parsers.py | 5 +- src/metax_api/permissions/permissions.py | 83 +- src/metax_api/renderers/renderers.py | 8 +- src/metax_api/services/api_error_service.py | 83 +- src/metax_api/services/auth_service.py | 34 +- src/metax_api/services/callable_service.py | 6 +- .../services/catalog_record_service.py | 932 ++-- .../services/catalog_record_service_v2.py | 7 +- src/metax_api/services/common_service.py | 261 +- .../services/data_catalog_service.py | 99 +- src/metax_api/services/datacite_service.py | 377 +- src/metax_api/services/file_service.py | 955 ++-- src/metax_api/services/pagination.py | 16 +- src/metax_api/services/rabbitmq_service.py | 34 +- src/metax_api/services/redis_cache_service.py | 21 +- .../services/reference_data_mixin.py | 199 +- src/metax_api/services/rems_service.py | 195 +- src/metax_api/services/schema_service.py | 17 +- src/metax_api/services/statistic_service.py | 277 +- src/metax_api/settings/__init__.py | 18 +- .../settings/components/access_control.py | 85 +- src/metax_api/settings/components/common.py | 2 +- .../settings/components/elasticsearch.py | 2 +- src/metax_api/settings/components/rabbitmq.py | 6 +- src/metax_api/settings/environments/local.py | 15 +- .../settings/environments/production.py | 2 +- src/metax_api/settings/environments/stable.py | 24 +- .../refdata_indexer/domain/indexable_data.py | 4 +- .../refdata/refdata_indexer/es_index_data.py | 8 +- .../service/finto_data_service.py | 12 +- .../service/organization_service.py | 4 +- src/metax_api/tests/api/oaipmh/minimal_api.py | 331 +- src/metax_api/tests/api/oaipmh/syke.py | 48 +- .../rest/base/serializers/file_serializer.py | 25 +- .../rest/base/serializers/serializer_utils.py | 38 +- .../api/rest/base/views/apierrors/read.py | 32 +- .../tests/api/rest/base/views/common/auth.py | 28 +- .../tests/api/rest/base/views/common/read.py | 197 +- .../tests/api/rest/base/views/common/write.py | 284 +- .../rest/base/views/contracts/contracts.py | 241 +- .../api/rest/base/views/datacatalogs/read.py | 14 +- .../api/rest/base/views/datacatalogs/write.py | 168 +- .../api/rest/base/views/datasets/read.py | 1150 +++-- .../api/rest/base/views/datasets/write.py | 4222 ++++++++++------- .../api/rest/base/views/directories/read.py | 1483 +++--- .../api/rest/base/views/directories/write.py | 217 +- .../tests/api/rest/base/views/files/read.py | 254 +- .../tests/api/rest/base/views/files/write.py | 1287 +++-- .../api/rest/base/views/filestorages/read.py | 15 +- .../api/rest/base/views/filestorages/write.py | 27 +- .../tests/api/rest/base/views/schemas/read.py | 19 +- .../tests/api/rest/v2/views/apierrors/read.py | 121 +- .../tests/api/rest/v2/views/common/auth.py | 35 +- .../tests/api/rest/v2/views/common/read.py | 201 +- .../tests/api/rest/v2/views/common/write.py | 290 +- .../api/rest/v2/views/contracts/contracts.py | 241 +- .../api/rest/v2/views/datacatalogs/read.py | 12 +- .../api/rest/v2/views/datacatalogs/write.py | 157 +- .../v2/views/datasets/api_version_lock.py | 232 +- .../api/rest/v2/views/datasets/drafts.py | 588 ++- .../rest/v2/views/datasets/filehandling.py | 917 ++-- .../tests/api/rest/v2/views/datasets/pas.py | 288 +- .../tests/api/rest/v2/views/datasets/read.py | 1005 ++-- .../rest/v2/views/datasets/referencedata.py | 783 +-- .../tests/api/rest/v2/views/datasets/rems.py | 444 +- .../tests/api/rest/v2/views/datasets/write.py | 1940 +++++--- .../api/rest/v2/views/directories/read.py | 1487 +++--- .../api/rest/v2/views/directories/write.py | 206 +- .../tests/api/rest/v2/views/files/read.py | 257 +- .../tests/api/rest/v2/views/files/write.py | 1272 +++-- .../api/rest/v2/views/filestorages/read.py | 15 +- .../api/rest/v2/views/filestorages/write.py | 27 +- .../tests/api/rest/v2/views/schemas/read.py | 19 +- .../tests/api/rpc/base/views/common_rpc.py | 5 +- .../tests/api/rpc/base/views/dataset_rpc.py | 506 +- .../tests/api/rpc/base/views/file_rpc.py | 48 +- .../tests/api/rpc/base/views/statistic_rpc.py | 756 +-- .../tests/api/rpc/v2/views/common_rpc.py | 5 +- .../tests/api/rpc/v2/views/dataset_rpc.py | 261 +- .../tests/api/rpc/v2/views/file_rpc.py | 48 +- .../tests/api/rpc/v2/views/statistic_rpc.py | 766 +-- .../management/commands/loadinitialdata.py | 36 +- .../tests/middleware/test_middleware.py | 102 +- .../tests/middleware/test_middleware_v2.py | 102 +- src/metax_api/tests/models/catalog_record.py | 95 +- src/metax_api/tests/models/common.py | 46 +- src/metax_api/tests/models/data_catalog.py | 8 +- src/metax_api/tests/models/directory.py | 16 +- src/metax_api/tests/models/file.py | 52 +- src/metax_api/tests/rabbitmq/consume.py | 31 +- .../tests/services/reference_data_mixin.py | 42 +- .../testdata/fetch_and_update_datasets.py | 125 +- .../tests/testdata/generate_test_data.py | 764 +-- src/metax_api/tests/testdata/request_test.py | 2 +- src/metax_api/tests/utils.py | 94 +- src/metax_api/urls.py | 24 +- src/metax_api/utils/reference_data_loader.py | 133 +- src/metax_api/utils/utils.py | 59 +- src/metax_api/views/router.py | 4 +- src/metax_api/views/secure/secure_view.py | 38 +- 155 files changed, 20420 insertions(+), 14209 deletions(-) diff --git a/docs/v1/source/conf.py b/docs/v1/source/conf.py index 13e88985..9f1010eb 100755 --- a/docs/v1/source/conf.py +++ b/docs/v1/source/conf.py @@ -19,14 +19,14 @@ # -- Project information ----------------------------------------------------- -project = 'Metax API' -copyright = '2018, csc.fi' -author = 'csc.fi' +project = "Metax API" +copyright = "2018, csc.fi" +author = "csc.fi" # The short X.Y version -version = '' +version = "" # The full version, including alpha/beta/rc tags -release = '' +release = "" # -- General configuration --------------------------------------------------- @@ -38,20 +38,19 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [ -] +extensions = [] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -66,7 +65,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- @@ -74,7 +73,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -82,25 +81,25 @@ # # html_theme_options = {} html_theme_options = { - 'canonical_url': '', - 'analytics_id': '', - 'logo_only': False, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, + "canonical_url": "", + "analytics_id": "", + "logo_only": False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, # Toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': -1, - 'includehidden': True, - 'titles_only': False + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": -1, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -116,7 +115,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'MetaxAPIdoc' +htmlhelp_basename = "MetaxAPIdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -125,15 +124,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -143,8 +139,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'MetaxAPI.tex', 'Metax API Documentation', - 'csc.fi', 'manual'), + (master_doc, "MetaxAPI.tex", "Metax API Documentation", "csc.fi", "manual"), ] @@ -152,10 +147,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'metaxapi', 'Metax API Documentation', - [author], 1) -] +man_pages = [(master_doc, "metaxapi", "Metax API Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -164,10 +156,17 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'MetaxAPI', 'Metax API Documentation', - author, 'MetaxAPI', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "MetaxAPI", + "Metax API Documentation", + author, + "MetaxAPI", + "One line description of project.", + "Miscellaneous", + ), ] + def setup(app): - app.add_stylesheet('custom.css') + app.add_stylesheet("custom.css") diff --git a/docs/v2/source/conf.py b/docs/v2/source/conf.py index 15550993..33ac0958 100755 --- a/docs/v2/source/conf.py +++ b/docs/v2/source/conf.py @@ -19,14 +19,14 @@ # -- Project information ----------------------------------------------------- -project = 'Metax API' -copyright = '2020, csc.fi' -author = 'csc.fi' +project = "Metax API" +copyright = "2020, csc.fi" +author = "csc.fi" # The short X.Y version -version = '' +version = "" # The full version, including alpha/beta/rc tags -release = '' +release = "" # -- General configuration --------------------------------------------------- @@ -38,20 +38,19 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [ -] +extensions = [] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -66,7 +65,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- @@ -74,7 +73,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -82,25 +81,25 @@ # # html_theme_options = {} html_theme_options = { - 'canonical_url': '', - 'analytics_id': '', - 'logo_only': False, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, + "canonical_url": "", + "analytics_id": "", + "logo_only": False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, # Toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': -1, - 'includehidden': True, - 'titles_only': False + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": -1, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -116,7 +115,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'MetaxAPIdoc' +htmlhelp_basename = "MetaxAPIdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -125,15 +124,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -143,8 +139,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'MetaxAPI.tex', 'Metax API Documentation', - 'csc.fi', 'manual'), + (master_doc, "MetaxAPI.tex", "Metax API Documentation", "csc.fi", "manual"), ] @@ -152,10 +147,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'metaxapi', 'Metax API Documentation', - [author], 1) -] +man_pages = [(master_doc, "metaxapi", "Metax API Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -164,10 +156,17 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'MetaxAPI', 'Metax API Documentation', - author, 'MetaxAPI', 'Part of the Fairdata services.', - 'Miscellaneous'), + ( + master_doc, + "MetaxAPI", + "Metax API Documentation", + author, + "MetaxAPI", + "Part of the Fairdata services.", + "Miscellaneous", + ), ] + def setup(app): - app.add_stylesheet('custom.css') + app.add_stylesheet("custom.css") diff --git a/src/metax_api/__init__.py b/src/metax_api/__init__.py index 2ace85dc..60ef1371 100755 --- a/src/metax_api/__init__.py +++ b/src/metax_api/__init__.py @@ -5,4 +5,4 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -default_app_config = 'metax_api.onappstart.OnAppStart' +default_app_config = "metax_api.onappstart.OnAppStart" diff --git a/src/metax_api/api/oaipmh/base/metax_oai_server.py b/src/metax_api/api/oaipmh/base/metax_oai_server.py index f6a83b83..2fd78634 100755 --- a/src/metax_api/api/oaipmh/base/metax_oai_server.py +++ b/src/metax_api/api/oaipmh/base/metax_oai_server.py @@ -11,44 +11,53 @@ from django.utils import timezone from oaipmh import common from oaipmh.common import ResumptionOAIPMH -from oaipmh.error import BadArgumentError, CannotDisseminateFormatError, IdDoesNotExistError, NoRecordsMatchError +from oaipmh.error import ( + BadArgumentError, + CannotDisseminateFormatError, + IdDoesNotExistError, + NoRecordsMatchError, +) from metax_api.models.catalog_record import CatalogRecord, DataCatalog from metax_api.services import CatalogRecordService as CRS from metax_api.services.datacite_service import DataciteException, convert_cr_to_datacite_cr_json # SYKE_IDENTIFIER_PREFIX -SYKE_URL_PREFIX_TEMPLATE = 'https://metadata.ymparisto.fi/dataset/%s' -DATACATALOGS_SET = 'datacatalogs' -DATASETS_SET = 'datasets' -OAI_DC_MDPREFIX = 'oai_dc' -OAI_DATACITE_MDPREFIX = 'oai_datacite' -OAI_FAIRDATA_DATACITE_MDPREFIX = 'oai_fairdata_datacite' -OAI_DC_URNRESOLVER_MDPREFIX = 'oai_dc_urnresolver' +SYKE_URL_PREFIX_TEMPLATE = "https://metadata.ymparisto.fi/dataset/%s" +DATACATALOGS_SET = "datacatalogs" +DATASETS_SET = "datasets" +OAI_DC_MDPREFIX = "oai_dc" +OAI_DATACITE_MDPREFIX = "oai_datacite" +OAI_FAIRDATA_DATACITE_MDPREFIX = "oai_fairdata_datacite" +OAI_DC_URNRESOLVER_MDPREFIX = "oai_dc_urnresolver" class MetaxOAIServer(ResumptionOAIPMH): - def _validate_mdprefix_and_set(self, metadataPrefix, set=None): if not set: pass elif set == DATACATALOGS_SET: if metadataPrefix != OAI_DC_MDPREFIX: - raise BadArgumentError('Invalid metadataPrefix value. Data catalogs can only be harvested using ' - '{0} format.'.format(OAI_DC_MDPREFIX)) - elif set in settings.OAI['SET_MAPPINGS']: + raise BadArgumentError( + "Invalid metadataPrefix value. Data catalogs can only be harvested using " + "{0} format.".format(OAI_DC_MDPREFIX) + ) + elif set in settings.OAI["SET_MAPPINGS"]: if set != DATASETS_SET and metadataPrefix == OAI_DC_URNRESOLVER_MDPREFIX: - raise BadArgumentError('When using metadataPrefix {0}, set value must be either {1} or {2}' - .format(OAI_DC_URNRESOLVER_MDPREFIX, DATACATALOGS_SET, DATASETS_SET)) + raise BadArgumentError( + "When using metadataPrefix {0}, set value must be either {1} or {2}".format( + OAI_DC_URNRESOLVER_MDPREFIX, DATACATALOGS_SET, DATASETS_SET + ) + ) else: - raise BadArgumentError('Invalid set value') + raise BadArgumentError("Invalid set value") @staticmethod def _get_default_set_filter(): # there are not that many sets yet, so just using list even though # there will be duplicates catalog_urns = [] - for k, v in settings.OAI['SET_MAPPINGS'].items(): + for k, v in settings.OAI["SET_MAPPINGS"].items(): catalog_urns.extend(v) return catalog_urns @@ -70,25 +79,36 @@ def _get_urnresolver_record_data(self, set, cursor, batch_size, from_=None, unti # Fetch only needed values as dict to increase performance. records = records.values( - 'identifier', - 'date_created', - 'date_modified', - 'data_catalog__catalog_json', - 'research_dataset') + "identifier", + "date_created", + "date_modified", + "data_catalog__catalog_json", + "research_dataset", + ) data = [] for record in records: metadatas = self._get_oai_dc_urnresolver_metadatas_for_record(record) for md in metadatas: - item = (common.Header('', self._get_record_identifier(record, set), - self._get_header_timestamp(record), ['metax'], False), - common.Metadata('', md), None) + item = ( + common.Header( + "", + self._get_record_identifier(record, set), + self._get_header_timestamp(record), + ["metax"], + False, + ), + common.Metadata("", md), + None, + ) data.append(item) cursor_end = cursor + batch_size if cursor + batch_size < len(data) else len(data) return data[cursor:cursor_end] - def _get_filtered_records_data(self, verb, metadata_prefix, set, cursor, batch_size, from_=None, until=None): + def _get_filtered_records_data( + self, verb, metadata_prefix, set, cursor, batch_size, from_=None, until=None + ): proxy = CatalogRecord if set == DATACATALOGS_SET: proxy = DataCatalog @@ -108,25 +128,43 @@ def _get_filtered_records_data(self, verb, metadata_prefix, set, cursor, batch_s pass else: query_set = query_set.filter( - data_catalog__catalog_json__identifier__in=settings.OAI['SET_MAPPINGS'][set]) + data_catalog__catalog_json__identifier__in=settings.OAI["SET_MAPPINGS"][set] + ) else: - query_set = query_set.filter(data_catalog__catalog_json__identifier__in=self._get_default_set_filter()) - query_set = query_set.filter(state='published') + query_set = query_set.filter( + data_catalog__catalog_json__identifier__in=self._get_default_set_filter() + ) + query_set = query_set.filter(state="published") data = [] for record in query_set: - if verb == 'ListRecords': + if verb == "ListRecords": try: - oai_item = self._get_oai_item(self._get_record_identifier(record, set), record, metadata_prefix) + oai_item = self._get_oai_item( + self._get_record_identifier(record, set), + record, + metadata_prefix, + ) data.append(oai_item) except CannotDisseminateFormatError as e: - if metadata_prefix == OAI_FAIRDATA_DATACITE_MDPREFIX or metadata_prefix == OAI_DATACITE_MDPREFIX: + if ( + metadata_prefix == OAI_FAIRDATA_DATACITE_MDPREFIX + or metadata_prefix == OAI_DATACITE_MDPREFIX + ): pass else: raise e - elif verb == 'ListIdentifiers': + elif verb == "ListIdentifiers": identifier = self._get_record_identifier(record, set) - data.append(common.Header('', identifier, self._get_header_timestamp(record), ['metax'], False)) + data.append( + common.Header( + "", + identifier, + self._get_header_timestamp(record), + ["metax"], + False, + ) + ) else: raise Exception("OAI-PMH bad code error") @@ -135,11 +173,11 @@ def _get_filtered_records_data(self, verb, metadata_prefix, set, cursor, batch_s def _handle_syke_urnresolver_metadata(self, record): identifiers = [] - preferred_identifier = record.research_dataset.get('preferred_identifier') + preferred_identifier = record.research_dataset.get("preferred_identifier") identifiers.append(preferred_identifier) - for id_obj in record.research_dataset.get('other_identifier', []): - if id_obj.get('notation', '').startswith('{'): - uuid = id_obj['notation'] + for id_obj in record.research_dataset.get("other_identifier", []): + if id_obj.get("notation", "").startswith("{"): + uuid = id_obj["notation"] identifiers.append(SYKE_URL_PREFIX_TEMPLATE % uuid) break return identifiers @@ -156,76 +194,84 @@ def _get_oai_dc_urnresolver_metadatas_for_record(self, record): metadatas = [] if isinstance(record, dict): - pref_id = record['research_dataset'].get('preferred_identifier') - dc_id = record['data_catalog__catalog_json'].get('identifier') - is_harvested = record['data_catalog__catalog_json'].get('harvested', False) is True - if record['research_dataset'].get('other_identifier') is not None: - other_ids = record['research_dataset'].get('other_identifier') + pref_id = record["research_dataset"].get("preferred_identifier") + dc_id = record["data_catalog__catalog_json"].get("identifier") + is_harvested = record["data_catalog__catalog_json"].get("harvested", False) is True + if record["research_dataset"].get("other_identifier") is not None: + other_ids = record["research_dataset"].get("other_identifier") else: other_ids = [] - if dc_id == 'urn:nbn:fi:att:data-catalog-harvest-syke': + if dc_id == "urn:nbn:fi:att:data-catalog-harvest-syke": for id_obj in other_ids: - if id_obj.get('notation', '').startswith('{'): - metadatas.append({'identifier': [SYKE_URL_PREFIX_TEMPLATE % id_obj['notation'], pref_id]}) + if id_obj.get("notation", "").startswith("{"): + metadatas.append( + { + "identifier": [ + SYKE_URL_PREFIX_TEMPLATE % id_obj["notation"], + pref_id, + ] + } + ) break elif dc_id not in settings.LEGACY_CATALOGS: - resolution_url = settings.OAI['ETSIN_URL_TEMPLATE'] % record['identifier'] - if not is_harvested and (pref_id.startswith('urn:nbn:fi:att:') or - pref_id.startswith('urn:nbn:fi:csc')): - metadatas.append({'identifier': [resolution_url, pref_id]}) + resolution_url = settings.OAI["ETSIN_URL_TEMPLATE"] % record["identifier"] + if not is_harvested and ( + pref_id.startswith("urn:nbn:fi:att:") or pref_id.startswith("urn:nbn:fi:csc") + ): + metadatas.append({"identifier": [resolution_url, pref_id]}) for id_obj in other_ids: - if id_obj.get('notation', '').startswith('urn:nbn:fi:csc-kata'): - metadatas.append({'identifier': [resolution_url, id_obj['notation']]}) + if id_obj.get("notation", "").startswith("urn:nbn:fi:csc-kata"): + metadatas.append({"identifier": [resolution_url, id_obj["notation"]]}) return metadatas def _get_oaic_dc_value(self, value, lang=None): valueDict = {} - valueDict['value'] = value + valueDict["value"] = value if lang: - valueDict['lang'] = lang + valueDict["lang"] = lang return valueDict def _get_oai_dc_metadata(self, record, json): identifier = [] - if 'preferred_identifier' in json: - identifier.append(self._get_oaic_dc_value(json.get('preferred_identifier'))) - if 'identifier' in json: - identifier.append(self._get_oaic_dc_value(json.get('identifier'))) + if "preferred_identifier" in json: + identifier.append(self._get_oaic_dc_value(json.get("preferred_identifier"))) + if "identifier" in json: + identifier.append(self._get_oaic_dc_value(json.get("identifier"))) title = [] - title_data = json.get('title', {}) + title_data = json.get("title", {}) for key, value in title_data.items(): title.append(self._get_oaic_dc_value(value, key)) creator = [] - creator_data = json.get('creator', []) + creator_data = json.get("creator", []) for value in creator_data: - if 'name' in value: - if isinstance(value['name'], dict): - for key, val in value['name'].items(): + if "name" in value: + if isinstance(value["name"], dict): + for key, val in value["name"].items(): creator.append(self._get_oaic_dc_value(val, key)) else: - creator.append(self._get_oaic_dc_value(value.get('name'))) + creator.append(self._get_oaic_dc_value(value.get("name"))) subject = [] - subject_data = json.get('keyword', []) + subject_data = json.get("keyword", []) for value in subject_data: subject.append(self._get_oaic_dc_value(value)) - subject_data = json.get('field_of_science', []) + subject_data = json.get("field_of_science", []) for value in subject_data: - for key, value2 in value.get('pref_label', {}).items(): + for key, value2 in value.get("pref_label", {}).items(): subject.append(self._get_oaic_dc_value(value2, key)) - subject_data = json.get('theme', []) + subject_data = json.get("theme", []) for value in subject_data: - for key, value2 in value.get('pref_label', {}).items(): + for key, value2 in value.get("pref_label", {}).items(): subject.append(self._get_oaic_dc_value(value2, key)) desc = [] - desc_data = json.get('description', None) + desc_data = json.get("description", None) if desc_data is not None: if isinstance(desc_data, dict): for key, value in desc_data.items(): @@ -234,71 +280,71 @@ def _get_oai_dc_metadata(self, record, json): desc.append(desc_data) publisher = [] - publisher_data = json.get('publisher', {}) - for key, value in publisher_data.get('name', {}).items(): + publisher_data = json.get("publisher", {}) + for key, value in publisher_data.get("name", {}).items(): publisher.append(self._get_oaic_dc_value(value, key)) contributor = [] - contributor_data = json.get('contributor', []) + contributor_data = json.get("contributor", []) for value in contributor_data: - if 'name' in value: - if isinstance(value['name'], dict): - for key, val in value['name'].items(): + if "name" in value: + if isinstance(value["name"], dict): + for key, val in value["name"].items(): contributor.append(self._get_oaic_dc_value(val, key)) else: - contributor.append(self._get_oaic_dc_value(value.get('name'))) + contributor.append(self._get_oaic_dc_value(value.get("name"))) date = self._get_oaic_dc_value(str(record.date_created)) language = [] - language_data = json.get('language', []) + language_data = json.get("language", []) for value in language_data: - if 'identifier' in value: - language.append(self._get_oaic_dc_value(value['identifier'])) + if "identifier" in value: + language.append(self._get_oaic_dc_value(value["identifier"])) relation = [] - relation_data = json.get('relation', []) + relation_data = json.get("relation", []) for value in relation_data: - if 'identifier' in value.get('entity', {}): - relation.append(self._get_oaic_dc_value(value['entity']['identifier'])) + if "identifier" in value.get("entity", {}): + relation.append(self._get_oaic_dc_value(value["entity"]["identifier"])) coverage = [] - coverage_data = json.get('spatial', []) + coverage_data = json.get("spatial", []) for value in coverage_data: - if 'geographic_name' in value: - coverage.append(self._get_oaic_dc_value(value['geographic_name'])) + if "geographic_name" in value: + coverage.append(self._get_oaic_dc_value(value["geographic_name"])) rights = [] - rights_data = json.get('access_rights', {}) - rights_desc = rights_data.get('description', {}).get('name', {}) + rights_data = json.get("access_rights", {}) + rights_desc = rights_data.get("description", {}).get("name", {}) for key, value in rights_desc.items(): rights.append(self._get_oaic_dc_value(value, key)) - for value in rights_data.get('license', []): - if 'identifier' in value: - rights.append(self._get_oaic_dc_value(value['identifier'])) + for value in rights_data.get("license", []): + if "identifier" in value: + rights.append(self._get_oaic_dc_value(value["identifier"])) if isinstance(record, CatalogRecord): - m_type = 'Dataset' + m_type = "Dataset" elif isinstance(record, DataCatalog): - m_type = 'Datacatalog' + m_type = "Datacatalog" else: - m_type = 'N/A' + m_type = "N/A" meta = { - 'identifier': identifier, - 'title': title, - 'creator': creator, - 'subject': subject, - 'description': desc, - 'publisher': publisher, - 'contributor': contributor, - 'date': [date], - 'type': [self._get_oaic_dc_value(m_type)], - 'language': language, - 'relation': relation, - 'coverage': coverage, - 'rights': rights + "identifier": identifier, + "title": title, + "creator": creator, + "subject": subject, + "description": desc, + "publisher": publisher, + "contributor": contributor, + "date": [date], + "type": [self._get_oaic_dc_value(m_type)], + "language": language, + "relation": relation, + "coverage": coverage, + "rights": rights, } return meta @@ -310,9 +356,9 @@ def _get_oai_datacite_metadata(self, cr, datacite_type): raise CannotDisseminateFormatError(str(e)) meta = { - 'datacentreSymbol': 'Metax', - 'schemaVersion': '4.1', - 'payload': datacite_xml + "datacentreSymbol": "Metax", + "schemaVersion": "4.1", + "payload": datacite_xml, } return meta @@ -328,9 +374,9 @@ def _get_metadata_for_record(self, record, metadataPrefix): if metadataPrefix == OAI_DC_MDPREFIX: meta = self._get_oai_dc_metadata(record, json) elif metadataPrefix == OAI_FAIRDATA_DATACITE_MDPREFIX: - meta = self._get_oai_datacite_metadata(record, 'fairdata_datacite') + meta = self._get_oai_datacite_metadata(record, "fairdata_datacite") elif metadataPrefix == OAI_DATACITE_MDPREFIX: - meta = self._get_oai_datacite_metadata(record, 'datacite') + meta = self._get_oai_datacite_metadata(record, "datacite") return self._fix_metadata(meta) @@ -339,15 +385,18 @@ def _get_header_timestamp(self, record): Can handle record as json or object. """ if isinstance(record, dict): - modified = record.get('date_modified', None) - timestamp = modified if modified is not None else record['date_created'] + modified = record.get("date_modified", None) + timestamp = modified if modified is not None else record["date_created"] else: timestamp = record.date_modified if record.date_modified else record.date_created return timezone.make_naive(timestamp) def _get_oai_item(self, identifier, record, metadata_prefix): - item = (common.Header('', identifier, self._get_header_timestamp(record), ['metax'], False), - common.Metadata('', self._get_metadata_for_record(record, metadata_prefix)), None) + item = ( + common.Header("", identifier, self._get_header_timestamp(record), ["metax"], False), + common.Metadata("", self._get_metadata_for_record(record, metadata_prefix)), + None, + ) return item def _fix_metadata(self, meta): @@ -366,91 +415,128 @@ def _get_record_identifier(self, record, set): Can handle record as json or object. """ if set == DATACATALOGS_SET: - return record['catalog_json__identifier'] if isinstance(record, dict) else record.catalog_json['identifier'] + return ( + record["catalog_json__identifier"] + if isinstance(record, dict) + else record.catalog_json["identifier"] + ) else: - return record['identifier'] if isinstance(record, dict) else record.identifier + return record["identifier"] if isinstance(record, dict) else record.identifier -# OAI-PMH VERBS + # OAI-PMH VERBS def identify(self): """Implement OAI-PMH verb Identify .""" - first = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=self._get_default_set_filter() - ).order_by( - 'date_created' - ).values_list('date_created', flat=True).first() + first = ( + CatalogRecord.objects.filter( + data_catalog__catalog_json__identifier__in=self._get_default_set_filter() + ) + .order_by("date_created") + .values_list("date_created", flat=True) + .first() + ) if first: first = timezone.make_naive(first) else: first = datetime.datetime.now() return common.Identify( - repositoryName=settings.OAI['REPOSITORY_NAME'], - baseURL=settings.OAI['BASE_URL'], + repositoryName=settings.OAI["REPOSITORY_NAME"], + baseURL=settings.OAI["BASE_URL"], protocolVersion="2.0", - adminEmails=[settings.OAI['ADMIN_EMAIL']], + adminEmails=[settings.OAI["ADMIN_EMAIL"]], earliestDatestamp=first, - deletedRecord='no', - granularity='YYYY-MM-DDThh:mm:ssZ', - compression=['identity']) + deletedRecord="no", + granularity="YYYY-MM-DDThh:mm:ssZ", + compression=["identity"], + ) def listMetadataFormats(self, identifier=None): """Implement OAI-PMH verb listMetadataFormats .""" - return [(OAI_DC_MDPREFIX, - 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd', - 'http://www.openarchives.org/OAI/2.0/oai_dc/'), - (OAI_FAIRDATA_DATACITE_MDPREFIX, - 'https://schema.datacite.org/meta/kernel-4.1/metadata.xsd', - 'https://schema.datacite.org/meta/kernel-4.1/'), - (OAI_DATACITE_MDPREFIX, - 'https://schema.datacite.org/meta/kernel-4.1/metadata.xsd', - 'https://schema.datacite.org/meta/kernel-4.1/'), - (OAI_DC_URNRESOLVER_MDPREFIX, - '', - '') - ] + return [ + ( + OAI_DC_MDPREFIX, + "http://www.openarchives.org/OAI/2.0/oai_dc.xsd", + "http://www.openarchives.org/OAI/2.0/oai_dc/", + ), + ( + OAI_FAIRDATA_DATACITE_MDPREFIX, + "https://schema.datacite.org/meta/kernel-4.1/metadata.xsd", + "https://schema.datacite.org/meta/kernel-4.1/", + ), + ( + OAI_DATACITE_MDPREFIX, + "https://schema.datacite.org/meta/kernel-4.1/metadata.xsd", + "https://schema.datacite.org/meta/kernel-4.1/", + ), + (OAI_DC_URNRESOLVER_MDPREFIX, "", ""), + ] def listSets(self, cursor=None, batch_size=None): """Implement OAI-PMH verb ListSets.""" - data = [(DATACATALOGS_SET, DATACATALOGS_SET, '')] - for set_key in settings.OAI['SET_MAPPINGS'].keys(): - data.append((set_key, set_key, '')) + data = [(DATACATALOGS_SET, DATACATALOGS_SET, "")] + for set_key in settings.OAI["SET_MAPPINGS"].keys(): + data.append((set_key, set_key, "")) return data - def listIdentifiers(self, metadataPrefix=None, set=None, cursor=None, - from_=None, until=None, batch_size=None): + def listIdentifiers( + self, + metadataPrefix=None, + set=None, + cursor=None, + from_=None, + until=None, + batch_size=None, + ): """Implement OAI-PMH verb listIdentifiers.""" if metadataPrefix == OAI_DC_URNRESOLVER_MDPREFIX: - raise BadArgumentError('Invalid metadataPrefix value. It can be only used with ListRecords verb') + raise BadArgumentError( + "Invalid metadataPrefix value. It can be only used with ListRecords verb" + ) self._validate_mdprefix_and_set(metadataPrefix, set) - return self._get_filtered_records_data('ListIdentifiers', metadataPrefix, set, cursor, batch_size, from_, until) - - def listRecords(self, metadataPrefix=None, set=None, cursor=None, from_=None, - until=None, batch_size=None): + return self._get_filtered_records_data( + "ListIdentifiers", metadataPrefix, set, cursor, batch_size, from_, until + ) + + def listRecords( + self, + metadataPrefix=None, + set=None, + cursor=None, + from_=None, + until=None, + batch_size=None, + ): """Implement OAI-PMH verb ListRecords.""" self._validate_mdprefix_and_set(metadataPrefix, set) data = [] if metadataPrefix == OAI_DC_URNRESOLVER_MDPREFIX: data = self._get_urnresolver_record_data(set, cursor, batch_size, from_, until) else: - data = self._get_filtered_records_data('ListRecords', metadataPrefix, set, cursor, batch_size, from_, until) + data = self._get_filtered_records_data( + "ListRecords", metadataPrefix, set, cursor, batch_size, from_, until + ) return data def getRecord(self, metadataPrefix, identifier): """Implement OAI-PMH verb GetRecord.""" try: if metadataPrefix == OAI_DC_URNRESOLVER_MDPREFIX: - raise BadArgumentError('Invalid metadataPrefix value. It can be only used with ListRecords verb') + raise BadArgumentError( + "Invalid metadataPrefix value. It can be only used with ListRecords verb" + ) record = CatalogRecord.objects.get(identifier__exact=identifier) - if record.state == 'draft': + if record.state == "draft": raise IdDoesNotExistError("No record with identifier %s is available." % identifier) except CatalogRecord.DoesNotExist: try: record = DataCatalog.objects.get(catalog_json__identifier__exact=identifier) if record and metadataPrefix != OAI_DC_MDPREFIX: - raise BadArgumentError('Invalid metadataPrefix value. Data catalogs can only be harvested using ' - '{0} format.'.format(OAI_DC_MDPREFIX)) + raise BadArgumentError( + "Invalid metadataPrefix value. Data catalogs can only be harvested using " + "{0} format.".format(OAI_DC_MDPREFIX) + ) except DataCatalog.DoesNotExist: raise IdDoesNotExistError("No record with identifier %s is available." % identifier) @@ -459,5 +545,8 @@ def getRecord(self, metadataPrefix, identifier): if metadata is None: raise NoRecordsMatchError - return (common.Header('', identifier, self._get_header_timestamp(record), ['metax'], False), - common.Metadata('', metadata), None) \ No newline at end of file + return ( + common.Header("", identifier, self._get_header_timestamp(record), ["metax"], False), + common.Metadata("", metadata), + None, + ) diff --git a/src/metax_api/api/oaipmh/base/view.py b/src/metax_api/api/oaipmh/base/view.py index 96c0b6ad..ae29a2c4 100755 --- a/src/metax_api/api/oaipmh/base/view.py +++ b/src/metax_api/api/oaipmh/base/view.py @@ -20,72 +20,93 @@ MetaxOAIServer, ) -NS_OAIDC_DATACITE = 'http://schema.datacite.org/oai/oai-1.0/' +NS_OAIDC_DATACITE = "http://schema.datacite.org/oai/oai-1.0/" -NS_OAIPMH = 'http://www.openarchives.org/OAI/2.0/' -NS_XSI = 'http://www.w3.org/2001/XMLSchema-instance' -NS_OAIDC = 'http://www.openarchives.org/OAI/2.0/oai_dc/' +NS_OAIPMH = "http://www.openarchives.org/OAI/2.0/" +NS_XSI = "http://www.w3.org/2001/XMLSchema-instance" +NS_OAIDC = "http://www.openarchives.org/OAI/2.0/oai_dc/" NS_DC = "http://purl.org/dc/elements/1.1/" NS_XML = "http://www.w3.org/XML/1998/namespace" def nsoai(name): - return '{%s}%s' % (NS_OAIPMH, name) + return "{%s}%s" % (NS_OAIPMH, name) def nsoaidc(name): - return '{%s}%s' % (NS_OAIDC, name) + return "{%s}%s" % (NS_OAIDC, name) def nsdc(name): - return '{%s}%s' % (NS_DC, name) + return "{%s}%s" % (NS_DC, name) def oai_dc_writer_with_lang(element, metadata): - e_dc = SubElement(element, nsoaidc('dc'), - nsmap={'oai_dc': NS_OAIDC, 'dc': NS_DC, 'xsi': NS_XSI, 'xml': NS_XML}) - e_dc.set('{%s}schemaLocation' % NS_XSI, - '%s http://www.openarchives.org/OAI/2.0/oai_dc.xsd' % NS_DC) + e_dc = SubElement( + element, + nsoaidc("dc"), + nsmap={"oai_dc": NS_OAIDC, "dc": NS_DC, "xsi": NS_XSI, "xml": NS_XML}, + ) + e_dc.set( + "{%s}schemaLocation" % NS_XSI, + "%s http://www.openarchives.org/OAI/2.0/oai_dc.xsd" % NS_DC, + ) map = metadata.getMap() for name in [ - 'title', 'creator', 'subject', 'description', 'publisher', - 'contributor', 'date', 'type', 'format', 'identifier', - 'source', 'language', 'relation', 'coverage', 'rights' + "title", + "creator", + "subject", + "description", + "publisher", + "contributor", + "date", + "type", + "format", + "identifier", + "source", + "language", + "relation", + "coverage", + "rights", ]: try: for value in map.get(name, []): e = SubElement(e_dc, nsdc(name)) - if 'lang' in value: - e.attrib['{http://www.w3.org/XML/1998/namespace}lang'] = value['lang'] - e.text = value['value'] + if "lang" in value: + e.attrib["{http://www.w3.org/XML/1998/namespace}lang"] = value["lang"] + e.text = value["value"] except: pass def oai_fairdata_datacite_writer(element, metadata): e_dc = SubElement(element, OAI_FAIRDATA_DATACITE_MDPREFIX, nsmap={None: NS_OAIDC_DATACITE}) - e_dc.set('{%s}schemaLocation' % NS_XSI, - 'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd') + e_dc.set( + "{%s}schemaLocation" % NS_XSI, + "http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd", + ) - e = SubElement(e_dc, 'schemaVersion') - e.text = metadata['schemaVersion'][0] - e = SubElement(e_dc, 'datacentreSymbol') - e.text = metadata['datacentreSymbol'][0] - e = SubElement(e_dc, 'payload') - e.append(etree.fromstring(metadata['payload'][0])) + e = SubElement(e_dc, "schemaVersion") + e.text = metadata["schemaVersion"][0] + e = SubElement(e_dc, "datacentreSymbol") + e.text = metadata["datacentreSymbol"][0] + e = SubElement(e_dc, "payload") + e.append(etree.fromstring(metadata["payload"][0])) def oai_datacite_writer(element, metadata): e_dc = SubElement(element, OAI_DATACITE_MDPREFIX, nsmap={None: NS_OAIDC_DATACITE}) - e_dc.set('{%s}schemaLocation' % NS_XSI, - 'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd') + e_dc.set( + "{%s}schemaLocation" % NS_XSI, + "http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd", + ) - e = SubElement(e_dc, 'schemaVersion') - e.text = metadata['schemaVersion'][0] - e = SubElement(e_dc, 'datacentreSymbol') - e.text = metadata['datacentreSymbol'][0] - e = SubElement(e_dc, 'payload') - e.append(etree.fromstring(metadata['payload'][0])) + e = SubElement(e_dc, "schemaVersion") + e.text = metadata["schemaVersion"][0] + e = SubElement(e_dc, "datacentreSymbol") + e.text = metadata["datacentreSymbol"][0] + e = SubElement(e_dc, "payload") + e.append(etree.fromstring(metadata["payload"][0])) def oaipmh_view(request): @@ -96,9 +117,11 @@ def oaipmh_view(request): metadata_registry.registerWriter(OAI_FAIRDATA_DATACITE_MDPREFIX, oai_fairdata_datacite_writer) metadata_registry.registerWriter(OAI_DATACITE_MDPREFIX, oai_datacite_writer) - server = oaiserver.BatchingServer(metax_server, - metadata_registry=metadata_registry, - resumption_batch_size=settings.OAI['BATCH_SIZE']) + server = oaiserver.BatchingServer( + metax_server, + metadata_registry=metadata_registry, + resumption_batch_size=settings.OAI["BATCH_SIZE"], + ) xml = server.handleRequest(request.GET.dict()) - return HttpResponse(xml, content_type='text/xml; charset=utf-8') + return HttpResponse(xml, content_type="text/xml; charset=utf-8") diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index b3bc11c2..146ff0a1 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -35,25 +35,27 @@ class CustomRouter(DefaultRouter): - def __init__(self, *args, **kwargs): """ Override to allow PUT and PATCH methods in resource list url. """ self.routes.pop(0) - self.routes.insert(0, Route( - url=r'^{prefix}{trailing_slash}$', - mapping={ - 'get': 'list', # original - 'post': 'create', # original - 'put': 'update_bulk', # custom - 'patch': 'partial_update_bulk', # custom - 'delete': 'destroy_bulk' # custom - }, - name='{basename}-list', - detail=False, - initkwargs={'suffix': 'List'} - )) + self.routes.insert( + 0, + Route( + url=r"^{prefix}{trailing_slash}$", + mapping={ + "get": "list", # original + "post": "create", # original + "put": "update_bulk", # custom + "patch": "partial_update_bulk", # custom + "delete": "destroy_bulk", # custom + }, + name="{basename}-list", + detail=False, + initkwargs={"suffix": "List"}, + ), + ) super(CustomRouter, self).__init__(*args, **kwargs) def get_default_basename(self, viewset): @@ -61,24 +63,24 @@ def get_default_basename(self, viewset): When a viewset has no queryset set, or base_name is not passed to a router as the 3rd parameter, automatically determine base name. """ - return viewset.__class__.__name__.split('View')[0] + return viewset.__class__.__name__.split("View")[0] router = CustomRouter(trailing_slash=False) -router.register(r'apierrors/?', ApiErrorViewSet) -router.register(r'contracts/?', ContractViewSet) -router.register(r'datasets/?', DatasetViewSet) -router.register(r'datacatalogs/?', DataCatalogViewSet) -router.register(r'directories/?', DirectoryViewSet) -router.register(r'files/?', FileViewSet) -router.register(r'filestorages/?', FileStorageViewSet) -router.register(r'schemas/?', SchemaViewSet) +router.register(r"apierrors/?", ApiErrorViewSet) +router.register(r"contracts/?", ContractViewSet) +router.register(r"datasets/?", DatasetViewSet) +router.register(r"datacatalogs/?", DataCatalogViewSet) +router.register(r"directories/?", DirectoryViewSet) +router.register(r"files/?", FileViewSet) +router.register(r"filestorages/?", FileStorageViewSet) +router.register(r"schemas/?", SchemaViewSet) # note: this somehow maps to list-api... but the end result works when # the presence of the parameters is inspected in the list-api method. router.register( - r'datasets/(?P.+)/metadata_versions/(?P.+)/?', - DatasetViewSet + r"datasets/(?P.+)/metadata_versions/(?P.+)/?", + DatasetViewSet, ) api_urlpatterns = router.urls diff --git a/src/metax_api/api/rest/base/serializers/__init__.py b/src/metax_api/api/rest/base/serializers/__init__.py index 68d4c8fc..1c243720 100755 --- a/src/metax_api/api/rest/base/serializers/__init__.py +++ b/src/metax_api/api/rest/base/serializers/__init__.py @@ -12,4 +12,4 @@ from .file_serializer import FileSerializer, LightFileSerializer from .file_storage_serializer import FileStorageSerializer from .serializer_utils import validate_json -from .xml_metadata_serializer import XmlMetadataSerializer \ No newline at end of file +from .xml_metadata_serializer import XmlMetadataSerializer diff --git a/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py b/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py index 99428620..acc6d675 100755 --- a/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py +++ b/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py @@ -29,85 +29,83 @@ # when end user creates a record, strip all fields except these END_USER_CREATE_ALLOWED_FIELDS = [ - 'data_catalog', - 'research_dataset', - + "data_catalog", + "research_dataset", # not set by the user, but are set by metax, so should not be discarded - 'date_created', - 'user_created', - '__request' + "date_created", + "user_created", + "__request", ] # when end user updates a record, strip all fields except these END_USER_UPDATE_ALLOWED_FIELDS = [ - 'research_dataset', - + "research_dataset", # not set by the user, but are set by metax, so should not be discarded - 'date_modified', - 'user_modified', - 'service_modified', - '__request' + "date_modified", + "user_modified", + "service_modified", + "__request", ] END_USER_ALLOWED_DATA_CATALOGS = django_settings.END_USER_ALLOWED_DATA_CATALOGS LEGACY_CATALOGS = django_settings.LEGACY_CATALOGS DFT_CATALOG = django_settings.DFT_DATA_CATALOG_IDENTIFIER -class CatalogRecordSerializer(CommonSerializer): +class CatalogRecordSerializer(CommonSerializer): class Meta: fields = ( - 'id', - 'identifier', - 'alternate_record_set', - 'contract', - 'data_catalog', - 'dataset_version_set', - 'deprecated', - 'date_deprecated', - 'metadata_owner_org', - 'metadata_provider_org', - 'metadata_provider_user', - 'research_dataset', - 'preservation_dataset_version', - 'preservation_dataset_origin_version', - 'preservation_state', - 'preservation_state_modified', - 'preservation_description', - 'preservation_reason_description', - 'preservation_identifier', - 'next_dataset_version', - 'previous_dataset_version', - 'mets_object_identifier', - 'state', - 'use_doi_for_published', - 'editor', - 'cumulative_state', - 'date_cumulation_started', - 'date_cumulation_ended', - 'date_last_cumulative_addition', - 'rems_identifier', - 'access_granter', - 'api_meta' + "id", + "identifier", + "alternate_record_set", + "contract", + "data_catalog", + "dataset_version_set", + "deprecated", + "date_deprecated", + "metadata_owner_org", + "metadata_provider_org", + "metadata_provider_user", + "research_dataset", + "preservation_dataset_version", + "preservation_dataset_origin_version", + "preservation_state", + "preservation_state_modified", + "preservation_description", + "preservation_reason_description", + "preservation_identifier", + "next_dataset_version", + "previous_dataset_version", + "mets_object_identifier", + "state", + "use_doi_for_published", + "editor", + "cumulative_state", + "date_cumulation_started", + "date_cumulation_ended", + "date_last_cumulative_addition", + "rems_identifier", + "access_granter", + "api_meta", ) + CommonSerializer.Meta.fields extra_kwargs = { # these values are generated automatically or provide default values on creation. # some fields can be later updated by the user, some are generated - 'identifier': { 'required': False }, - 'preservation_state': { 'required': False }, - 'preservation_description': { 'required': False }, - 'preservation_state_modified': { 'required': False }, - 'mets_object_identifier': { 'required': False }, - 'next_dataset_version': { 'required': False }, - 'previous_dataset_version': { 'required': False }, - 'preservation_dataset_origin_version': { 'required': False }, + "identifier": {"required": False}, + "preservation_state": {"required": False}, + "preservation_description": {"required": False}, + "preservation_state_modified": {"required": False}, + "mets_object_identifier": {"required": False}, + "next_dataset_version": {"required": False}, + "previous_dataset_version": {"required": False}, + "preservation_dataset_origin_version": {"required": False}, } extra_kwargs.update(CommonSerializer.Meta.extra_kwargs) # schemas dir is effectively ../schemas/ - _schemas_directory_path = path.join(path.dirname(path.dirname(__file__)), 'schemas') + _schemas_directory_path = path.join(path.dirname(path.dirname(__file__)), "schemas") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -120,23 +118,26 @@ def is_valid(self, raise_exception=False): elif self._operation_is_update: self._end_user_update_validations(self.instance, self.initial_data) - if self.initial_data.get('data_catalog', False): - self.initial_data['data_catalog'] = self._get_id_from_related_object( - 'data_catalog', self._get_data_catalog_relation) - if self.initial_data.get('contract', False): - self.initial_data['contract'] = self._get_id_from_related_object('contract', self._get_contract_relation) - - self.initial_data.pop('alternate_record_set', None) - self.initial_data.pop('dataset_version_set', None) - self.initial_data.pop('next_dataset_version', None) - self.initial_data.pop('previous_dataset_version', None) - self.initial_data.pop('deprecated', None) - self.initial_data.pop('date_deprecated', None) - self.initial_data.pop('state', None) - self.initial_data.pop('preservation_identifier', None) - self.initial_data.pop('preservation_dataset_version', None) - self.initial_data.pop('preservation_dataset_origin_version', None) - self.initial_data.pop('rems_identifier', None) + if self.initial_data.get("data_catalog", False): + self.initial_data["data_catalog"] = self._get_id_from_related_object( + "data_catalog", self._get_data_catalog_relation + ) + if self.initial_data.get("contract", False): + self.initial_data["contract"] = self._get_id_from_related_object( + "contract", self._get_contract_relation + ) + + self.initial_data.pop("alternate_record_set", None) + self.initial_data.pop("dataset_version_set", None) + self.initial_data.pop("next_dataset_version", None) + self.initial_data.pop("previous_dataset_version", None) + self.initial_data.pop("deprecated", None) + self.initial_data.pop("date_deprecated", None) + self.initial_data.pop("state", None) + self.initial_data.pop("preservation_identifier", None) + self.initial_data.pop("preservation_dataset_version", None) + self.initial_data.pop("preservation_dataset_origin_version", None) + self.initial_data.pop("rems_identifier", None) if self._data_catalog_is_changed(): # updating data catalog, but not necessarily research_dataset. @@ -152,31 +153,35 @@ def is_valid(self, raise_exception=False): # ensure any operation made on research_dataset during serializer.is_valid(), # is still compatible with the schema - if 'research_dataset' in self.initial_data: - self.validate_json_schema(self.initial_data['research_dataset']) - self._validate_org_name_is_set(self.initial_data['research_dataset']) + if "research_dataset" in self.initial_data: + self.validate_json_schema(self.initial_data["research_dataset"]) + self._validate_org_name_is_set(self.initial_data["research_dataset"]) def update(self, instance, validated_data): - if 'preserve_version' in self.context['request'].query_params: + if "preserve_version" in self.context["request"].query_params: if self._request_by_end_user(): - raise Http403({ 'detail': [ 'Parameter preserve_version not permitted for end users' ]}) + raise Http403( + {"detail": ["Parameter preserve_version not permitted for end users"]} + ) # execute updates without creating new versions instance.preserve_version = True return super(CatalogRecordSerializer, self).update(instance, validated_data) def create(self, validated_data): - if 'V2' not in self.__class__.__name__ and \ - validated_data['data_catalog'].catalog_json['identifier'] == DFT_CATALOG: - raise Http400({ 'detail': [ 'Draft catalog cannot be used in V1 API' ]}) + if ( + "V2" not in self.__class__.__name__ + and validated_data["data_catalog"].catalog_json["identifier"] == DFT_CATALOG + ): + raise Http400({"detail": ["Draft catalog cannot be used in V1 API"]}) if self._migration_override_requested(): # any custom stuff before create that might be necessary for migration purposes - pid = '' - if validated_data['research_dataset'].get('preferred_identifier', False): + pid = "" + if validated_data["research_dataset"].get("preferred_identifier", False): # store pid, since it will be overwritten during create otherwise - pid = validated_data['research_dataset']['preferred_identifier'] + pid = validated_data["research_dataset"]["preferred_identifier"] res = super().create(validated_data) @@ -185,7 +190,7 @@ def create(self, validated_data): # any custom stuff after create that my be necessary for migration purposes if pid: # save original pid provided by the requestor - res.research_dataset['preferred_identifier'] = pid + res.research_dataset["preferred_identifier"] = pid # save, while bypassing normal save-related procedures in CatalogRecord model super(Common, res).save() @@ -196,8 +201,10 @@ def _end_user_update_validations(self, instance, validated_data): """ Enforce some rules related to end users when updating records. """ - self._check_end_user_allowed_catalogs(instance.data_catalog.catalog_json['identifier']) - fields_to_discard = [ key for key in validated_data.keys() if key not in END_USER_UPDATE_ALLOWED_FIELDS ] + self._check_end_user_allowed_catalogs(instance.data_catalog.catalog_json["identifier"]) + fields_to_discard = [ + key for key in validated_data.keys() if key not in END_USER_UPDATE_ALLOWED_FIELDS + ] for field_name in fields_to_discard: del validated_data[field_name] @@ -208,23 +215,29 @@ def _end_user_create_validations(self, validated_data): - Can only create records into specified catalogs (and can only use the identifier value, not id's directly!) - Will have some fields automatically filled for them """ - fields_to_discard = [ key for key in validated_data.keys() if key not in END_USER_CREATE_ALLOWED_FIELDS ] + fields_to_discard = [ + key for key in validated_data.keys() if key not in END_USER_CREATE_ALLOWED_FIELDS + ] for field_name in fields_to_discard: del validated_data[field_name] # set some fields to whoever the authentication token belonged to. - validated_data['metadata_provider_user'] = self.context['request'].user.username - validated_data['metadata_provider_org'] = self.context['request'].user.token['schacHomeOrganization'] - validated_data['metadata_owner_org'] = self.context['request'].user.token['schacHomeOrganization'] + validated_data["metadata_provider_user"] = self.context["request"].user.username + validated_data["metadata_provider_org"] = self.context["request"].user.token[ + "schacHomeOrganization" + ] + validated_data["metadata_owner_org"] = self.context["request"].user.token[ + "schacHomeOrganization" + ] try: - dc_identifier = validated_data['data_catalog'].catalog_json['identifier'] + dc_identifier = validated_data["data_catalog"].catalog_json["identifier"] except: try: - if isinstance(validated_data['data_catalog'], dict): - dc_identifier = validated_data['data_catalog']['identifier'] + if isinstance(validated_data["data_catalog"], dict): + dc_identifier = validated_data["data_catalog"]["identifier"] else: - dc_identifier = validated_data['data_catalog'] + dc_identifier = validated_data["data_catalog"] except KeyError: # an error is raise later about missing required field return @@ -233,74 +246,82 @@ def _end_user_create_validations(self, validated_data): def _check_end_user_allowed_catalogs(self, dc_identifier): if dc_identifier not in END_USER_ALLOWED_DATA_CATALOGS: - raise Http403({ - 'detail': [ - 'You do not have access to the selected data catalog. Please use one of the following ' - 'catalogs: %s' % ', '.join(END_USER_ALLOWED_DATA_CATALOGS) - ] - }) + raise Http403( + { + "detail": [ + "You do not have access to the selected data catalog. Please use one of the following " + "catalogs: %s" % ", ".join(END_USER_ALLOWED_DATA_CATALOGS) + ] + } + ) def to_representation(self, instance): res = super(CatalogRecordSerializer, self).to_representation(instance) - if 'data_catalog' in res: - if self.expand_relation_requested('data_catalog'): - res['data_catalog'] = DataCatalogSerializer(instance.data_catalog).data + if "data_catalog" in res: + if self.expand_relation_requested("data_catalog"): + res["data_catalog"] = DataCatalogSerializer(instance.data_catalog).data else: - res['data_catalog'] = { - 'id': instance.data_catalog.id, - 'identifier': instance.data_catalog.catalog_json['identifier'], + res["data_catalog"] = { + "id": instance.data_catalog.id, + "identifier": instance.data_catalog.catalog_json["identifier"], } - if 'contract' in res: - if self.expand_relation_requested('contract'): - res['contract'] = ContractSerializer(instance.contract).data + if "contract" in res: + if self.expand_relation_requested("contract"): + res["contract"] = ContractSerializer(instance.contract).data else: - res['contract'] = { - 'id': instance.contract.id, - 'identifier': instance.contract.contract_json['identifier'] + res["contract"] = { + "id": instance.contract.id, + "identifier": instance.contract.contract_json["identifier"], } - if 'alternate_record_set' in res: + if "alternate_record_set" in res: alternate_records = instance.alternate_record_set.records.exclude(pk=instance.id) if len(alternate_records): - res['alternate_record_set'] = [ ar.identifier for ar in alternate_records ] + res["alternate_record_set"] = [ar.identifier for ar in alternate_records] - if 'dataset_version_set' in res: - res['dataset_version_set'] = instance.dataset_version_set.get_listing() + if "dataset_version_set" in res: + res["dataset_version_set"] = instance.dataset_version_set.get_listing() - if 'next_dataset_version' in res: + if "next_dataset_version" in res: if instance.next_dataset_version.state == CatalogRecord.STATE_PUBLISHED: - res['next_dataset_version'] = instance.next_dataset_version.identifiers_dict - elif instance.user_is_privileged(instance.request or self.context['request']): + res["next_dataset_version"] = instance.next_dataset_version.identifiers_dict + elif instance.user_is_privileged(instance.request or self.context["request"]): # include additional information to show the owner this version is actually still just a draft - res['next_dataset_version'] = instance.next_dataset_version.identifiers_dict - res['next_dataset_version']['state'] = CatalogRecord.STATE_DRAFT + res["next_dataset_version"] = instance.next_dataset_version.identifiers_dict + res["next_dataset_version"]["state"] = CatalogRecord.STATE_DRAFT else: # if the next dataset version is still just a draft, then unauthorized users dont need to know about it. - del res['next_dataset_version'] - - if 'previous_dataset_version' in res: - res['previous_dataset_version'] = instance.previous_dataset_version.identifiers_dict - - if 'preservation_dataset_version' in res: - res['preservation_dataset_version'] = instance.preservation_dataset_version.identifiers_dict - res['preservation_dataset_version']['preservation_state'] = \ - instance.preservation_dataset_version.preservation_state - - elif 'preservation_dataset_origin_version' in res: - res['preservation_dataset_origin_version'] = instance.preservation_dataset_origin_version.identifiers_dict - res['preservation_dataset_origin_version']['deprecated'] = \ - instance.preservation_dataset_origin_version.deprecated + del res["next_dataset_version"] + + if "previous_dataset_version" in res: + res["previous_dataset_version"] = instance.previous_dataset_version.identifiers_dict + + if "preservation_dataset_version" in res: + res[ + "preservation_dataset_version" + ] = instance.preservation_dataset_version.identifiers_dict + res["preservation_dataset_version"][ + "preservation_state" + ] = instance.preservation_dataset_version.preservation_state + + elif "preservation_dataset_origin_version" in res: + res[ + "preservation_dataset_origin_version" + ] = instance.preservation_dataset_origin_version.identifiers_dict + res["preservation_dataset_origin_version"][ + "deprecated" + ] = instance.preservation_dataset_origin_version.deprecated if instance.new_dataset_version_created: - res['new_version_created'] = instance.new_dataset_version_created + res["new_version_created"] = instance.new_dataset_version_created # Do the population of file_details here, since if it was done in the view, it might not know the file/dir # identifiers any longer, since the potential stripping of file/dir fields takes away identifier fields from # File and Directory objects, which are needed in populating file_details - if 'request' in self.context and 'file_details' in self.context['request'].query_params: - CRS.populate_file_details(res, self.context['request']) + if "request" in self.context and "file_details" in self.context["request"].query_params: + CRS.populate_file_details(res, self.context["request"]) res = self._check_and_strip_sensitive_fields(instance, res) @@ -324,15 +345,16 @@ def _check_and_strip_sensitive_fields(self, instance, res): retrieving a list where some filter was used, the identifiers may not be known either... It is much more straightforward to do it here. """ - if 'request' in self.context: - if not instance.user_is_privileged(self.context['request']): - if res.get('research_dataset'): + if "request" in self.context: + if not instance.user_is_privileged(self.context["request"]): + if res.get("research_dataset"): # research_dataset can be missing if not listed in fields query param - res['research_dataset'] = CRS.check_and_remove_metadata_based_on_access_type( - CRS.remove_contact_info_metadata(res['research_dataset'])) + res["research_dataset"] = CRS.check_and_remove_metadata_based_on_access_type( + CRS.remove_contact_info_metadata(res["research_dataset"]) + ) - res.pop('rems_identifier', None) - res.pop('access_granter', None) + res.pop("rems_identifier", None) + res.pop("access_granter", None) return res @@ -340,29 +362,32 @@ def _populate_dir_titles(self, ds): """ If dir title has been omitted, populate it with its dir name. """ - if 'directories' not in ds: + if "directories" not in ds: return # make sure to not populate title for entries that already contain other dataset-specific metadata dirs_to_populate = [ - dr['identifier'] for dr in ds['directories'] - if dr.get('title', None) is None + dr["identifier"] + for dr in ds["directories"] + if dr.get("title", None) is None and len(dr) > 1 - and dr.get('exclude', False) is False - and dr.get('delete', False) is False + and dr.get("exclude", False) is False + and dr.get("delete", False) is False ] if dirs_to_populate: dirs_from_db = [ - dr for dr in - Directory.objects.filter(identifier__in=dirs_to_populate).values('identifier', 'directory_name') + dr + for dr in Directory.objects.filter(identifier__in=dirs_to_populate).values( + "identifier", "directory_name" + ) ] - for dr in ds['directories']: + for dr in ds["directories"]: for i, dir_details in enumerate(dirs_from_db): - if dir_details['identifier'] == dr['identifier']: - dr['title'] = dir_details['directory_name'] + if dir_details["identifier"] == dr["identifier"]: + dr["title"] = dir_details["directory_name"] dirs_from_db.pop(i) break @@ -370,38 +395,43 @@ def _populate_file_titles(self, ds): """ If file title has been omitted, populate it with its file name. """ - if 'files' not in ds: + if "files" not in ds: return # make sure to not populate title for entries that already contain other dataset-specific metadata files_to_populate = [ - f['identifier'] for f in ds['files'] - if f.get('title', None) is None + f["identifier"] + for f in ds["files"] + if f.get("title", None) is None and len(f) > 1 - and f.get('exclude', False) is False - and f.get('delete', False) is False + and f.get("exclude", False) is False + and f.get("delete", False) is False ] if files_to_populate: files_from_db = [ - f for f in - File.objects.filter(identifier__in=files_to_populate).values('identifier', 'file_name') + f + for f in File.objects.filter(identifier__in=files_to_populate).values( + "identifier", "file_name" + ) ] - for f in ds['files']: + for f in ds["files"]: for i, file_details in enumerate(files_from_db): - if file_details['identifier'] == f['identifier']: - f['title'] = file_details['file_name'] + if file_details["identifier"] == f["identifier"]: + f["title"] = file_details["file_name"] files_from_db.pop(i) break def _validate_draft_data_catalog(self): # catalog object is not yet included to initial_data so have to fetch it - dc_pid = DataCatalog.objects.get(pk=self.initial_data['data_catalog']).catalog_json['identifier'] + dc_pid = DataCatalog.objects.get(pk=self.initial_data["data_catalog"]).catalog_json[ + "identifier" + ] if dc_pid == DFT_CATALOG and (self.instance.is_published() or self.instance.draft_of): - raise ValidationError({ 'detail': ['Catalog cannot be changed back to draft'] }) + raise ValidationError({"detail": ["Catalog cannot be changed back to draft"]}) def validate_research_dataset(self, value): self._populate_file_and_dir_titles(value) @@ -416,16 +446,16 @@ def validate_research_dataset(self, value): return value def _populate_file_and_dir_titles(self, value): - if 'directories' in value and not value['directories']: + if "directories" in value and not value["directories"]: # remove if empty list - del value['directories'] + del value["directories"] else: self._populate_dir_titles(value) - if 'files' in value and not value['files']: + if "files" in value and not value["files"]: # remove if empty list - del value['files'] + del value["files"] else: self._populate_file_titles(value) @@ -433,34 +463,48 @@ def validate_json_schema(self, value): self._set_dataset_schema() if self._operation_is_create: - if not value.get('preferred_identifier', None): - if DataCatalogService.is_harvested(self.initial_data.get('data_catalog')): - raise ValidationError({ 'preferred_identifier': - ['harvested catalog record must have preferred identifier']}) + if not value.get("preferred_identifier", None): + if DataCatalogService.is_harvested(self.initial_data.get("data_catalog")): + raise ValidationError( + { + "preferred_identifier": [ + "harvested catalog record must have preferred identifier" + ] + } + ) # normally not present, but may be set by harvesters. if missing, # use temporary value and remove after schema validation. - value['preferred_identifier'] = 'temp' + value["preferred_identifier"] = "temp" if self._migration_override_requested(): - for is_output_of in value.get('is_output_of', []): - if 'source_organization' not in is_output_of or not is_output_of['source_organization']: - is_output_of['source_organization'] = [ - {'@type': 'Organization', - 'identifier': 'MIGRATION_OVERRIDE', - 'name': {'und': 'temp'}} + for is_output_of in value.get("is_output_of", []): + if ( + "source_organization" not in is_output_of + or not is_output_of["source_organization"] + ): + is_output_of["source_organization"] = [ + { + "@type": "Organization", + "identifier": "MIGRATION_OVERRIDE", + "name": {"und": "temp"}, + } ] validate_json(value, self.json_schema) - if value['preferred_identifier'] == 'temp': - value.pop('preferred_identifier') + if value["preferred_identifier"] == "temp": + value.pop("preferred_identifier") if self._migration_override_requested(): - for is_output_of in value.get('is_output_of', []): - if 'source_organization' in is_output_of and len(is_output_of['source_organization']) == 1 and \ - is_output_of['source_organization'][0]['identifier'] == 'MIGRATION_OVERRIDE': - is_output_of.pop('source_organization', None) + for is_output_of in value.get("is_output_of", []): + if ( + "source_organization" in is_output_of + and len(is_output_of["source_organization"]) == 1 + and is_output_of["source_organization"][0]["identifier"] + == "MIGRATION_OVERRIDE" + ): + is_output_of.pop("source_organization", None) else: # update operations @@ -476,16 +520,18 @@ def _validate_org_name_is_set(self, obj): a name. """ if isinstance(obj, dict): - if '@type' in obj and obj['@type'] == 'Organization' and 'name' not in obj: - raise ValidationError({ - 'detail': [ - 'Specified organization object does not have a name. If you are using ' - 'an org identifier from reference data, then the name will be populated ' - 'automatically. If your org identifier is not from reference data, you ' - 'must provide the organization name. The object that caused the error: %s' - % str(obj) - ] - }) + if "@type" in obj and obj["@type"] == "Organization" and "name" not in obj: + raise ValidationError( + { + "detail": [ + "Specified organization object does not have a name. If you are using " + "an org identifier from reference data, then the name will be populated " + "automatically. If your org identifier is not from reference data, you " + "must provide the organization name. The object that caused the error: %s" + % str(obj) + ] + } + ) for field, value in obj.items(): if isinstance(value, (dict, list)): self._validate_org_name_is_set(value) @@ -514,34 +560,42 @@ def _validate_research_dataset_uniqueness(self, research_dataset): if not self._catalog_enforces_unique_pids(): return - preferred_identifier_value = research_dataset.get('preferred_identifier', None) + preferred_identifier_value = research_dataset.get("preferred_identifier", None) if not preferred_identifier_value: # during create, preferred_identifier is not necessarily set return - if self._find_object_using_identifier('preferred_identifier', preferred_identifier_value): + if self._find_object_using_identifier("preferred_identifier", preferred_identifier_value): if self._data_catalog_supports_versioning(): - raise ValidationError([ - 'A catalog record with this research_dataset ->> preferred_identifier' - ' already exists in another data catalog. When saving to ATT catalog,' - ' the preferred_identifier must not already exist in other catalogs.' - ]) + raise ValidationError( + [ + "A catalog record with this research_dataset ->> preferred_identifier" + " already exists in another data catalog. When saving to ATT catalog," + " the preferred_identifier must not already exist in other catalogs." + ] + ) else: # harvested catalog - raise ValidationError([ - 'A catalog record with this research_dataset ->> preferred_identifier' - ' already exists in this data catalog.' - ]) + raise ValidationError( + [ + "A catalog record with this research_dataset ->> preferred_identifier" + " already exists in this data catalog." + ] + ) # cr not found using preferred_identifier. preferred_identifier value should never # be the same as metadata_version_identifier value in any catalog, so look for existing records # using metadata_version_identifier also - if self._find_object_using_identifier('metadata_version_identifier', preferred_identifier_value): - raise ValidationError([ - 'A catalog record already exists which has the given preferred_identifier' - ' value as its metadata_version_identifier value.' - ]) + if self._find_object_using_identifier( + "metadata_version_identifier", preferred_identifier_value + ): + raise ValidationError( + [ + "A catalog record already exists which has the given preferred_identifier" + " value as its metadata_version_identifier value." + ] + ) def _catalog_enforces_unique_pids(self): """ @@ -550,15 +604,17 @@ def _catalog_enforces_unique_pids(self): """ if self._operation_is_create: try: - dc = DataCatalog.objects.values('catalog_json').get(pk=self.initial_data['data_catalog']) + dc = DataCatalog.objects.values("catalog_json").get( + pk=self.initial_data["data_catalog"] + ) except DataCatalog.DoesNotExist: - raise ValidationError({ 'detail': ['Provided data catalog does not exist']}) + raise ValidationError({"detail": ["Provided data catalog does not exist"]}) except KeyError: # data_catalog was omitted. an approriate error is raised later. return - dc_pid = dc['catalog_json']['identifier'] + dc_pid = dc["catalog_json"]["identifier"] else: - dc_pid = self.instance.data_catalog.catalog_json['identifier'] + dc_pid = self.instance.data_catalog.catalog_json["identifier"] return dc_pid not in LEGACY_CATALOGS @@ -569,28 +625,28 @@ def _find_object_using_identifier(self, field_name, identifier): - takes into account data_catalog when searching by preferred_identifier - does not use select_related() to also fetch relations, since they are not needed. """ - params = { 'research_dataset__contains': { field_name: identifier }} + params = {"research_dataset__contains": {field_name: identifier}} - if field_name == 'preferred_identifier' and not self._data_catalog_supports_versioning(): + if field_name == "preferred_identifier" and not self._data_catalog_supports_versioning(): # only look for hits within the same data catalog. if self._operation_is_create: # value of data_catalog in initial_data is set in is_valid() - params['data_catalog'] = self.initial_data['data_catalog'] + params["data_catalog"] = self.initial_data["data_catalog"] else: # updates - if 'data_catalog' in self.initial_data: + if "data_catalog" in self.initial_data: # the update operation is updating data_catalog as well, # so make sure the new catalog is checked for not having # the identifier currently being checked. # value of data_catalog in initial_data is set in is_valid() - params['data_catalog'] = self.initial_data['data_catalog'] + params["data_catalog"] = self.initial_data["data_catalog"] else: # a PATCH which does not contain data_catalog - get # data_catalog id from the instance being updated == what # is currently in db - params['data_catalog'] = self.instance.data_catalog.id + params["data_catalog"] = self.instance.data_catalog.id else: # checking metadata_version_identifier, or saving to ATT catalog - in both cases, find matches @@ -603,88 +659,107 @@ def _find_object_using_identifier(self, field_name, identifier): # preferred_identifiers already existing in ATT catalogs are fine, so exclude # results from ATT catalogs. matches in other catalogs however are considered # an error. - return CatalogRecord.objects_unfiltered.filter(**params) \ - .exclude(data_catalog_id=self.instance.data_catalog_id).exists() + return ( + CatalogRecord.objects_unfiltered.filter(**params) + .exclude(data_catalog_id=self.instance.data_catalog_id) + .exists() + ) else: - return CatalogRecord.objects_unfiltered.filter(**params).exclude(pk=self.instance.id).exists() + return ( + CatalogRecord.objects_unfiltered.filter(**params) + .exclude(pk=self.instance.id) + .exists() + ) def _data_catalog_is_changed(self): """ Check if data_catalog of the record is being changed. Used to decide if preferred_identifier uniqueness should be checked in certain situations. """ - if self._operation_is_update and 'data_catalog' in self.initial_data: - dc = self.initial_data['data_catalog'] + if self._operation_is_update and "data_catalog" in self.initial_data: + dc = self.initial_data["data_catalog"] if isinstance(dc, int): return dc != self.instance.data_catalog.id elif isinstance(dc, str): - return dc != self.instance.catalog_json['identifier'] + return dc != self.instance.catalog_json["identifier"] elif isinstance(dc, dict): - return dc['identifier'] != self.instance.catalog_json['identifier'] - else: # pragma: no cover - raise ValidationError({ 'detail': ['can not figure out the type of data_catalog'] }) + return dc["identifier"] != self.instance.catalog_json["identifier"] + else: # pragma: no cover + raise ValidationError({"detail": ["can not figure out the type of data_catalog"]}) def _preferred_identifier_is_changed(self): """ Check if preferred_identifier is being updated in the current request or not. """ - return self.initial_data['research_dataset'].get('preferred_identifier', None) \ + return ( + self.initial_data["research_dataset"].get("preferred_identifier", None) != self.instance.preferred_identifier + ) def _data_catalog_supports_versioning(self): - if 'data_catalog' in self.initial_data: + if "data_catalog" in self.initial_data: # must always fetch from db, to know if it supports versioning or not - catalog_json = DataCatalog.objects.filter(pk=self.initial_data['data_catalog']) \ - .only('catalog_json').first().catalog_json + catalog_json = ( + DataCatalog.objects.filter(pk=self.initial_data["data_catalog"]) + .only("catalog_json") + .first() + .catalog_json + ) else: try: catalog_json = self.instance.data_catalog.catalog_json except AttributeError: - raise ValidationError({ 'data_catalog': ['data_catalog is a required field']}) + raise ValidationError({"data_catalog": ["data_catalog is a required field"]}) - return catalog_json.get('dataset_versioning', False) is True + return catalog_json.get("dataset_versioning", False) is True def _get_contract_relation(self, identifier_value): """ Passed to _get_id_from_related_object() to be used when relation was a string identifier """ if isinstance(identifier_value, dict): - identifier_value = identifier_value['contract_json']['identifier'] + identifier_value = identifier_value["contract_json"]["identifier"] try: - return Contract.objects.get( - contract_json__contains={ 'identifier': identifier_value } - ).id + return Contract.objects.get(contract_json__contains={"identifier": identifier_value}).id except Contract.DoesNotExist: - raise ValidationError({ 'contract': ['identifier %s not found.' % str(identifier_value)]}) + raise ValidationError( + {"contract": ["identifier %s not found." % str(identifier_value)]} + ) def _get_data_catalog_relation(self, identifier_value): """ Passed to _get_id_from_related_object() to be used when relation was a string identifier """ if isinstance(identifier_value, dict): - identifier_value = identifier_value['catalog_json']['identifier'] + identifier_value = identifier_value["catalog_json"]["identifier"] try: return DataCatalog.objects.get( - catalog_json__contains={ 'identifier': identifier_value } + catalog_json__contains={"identifier": identifier_value} ).id except DataCatalog.DoesNotExist: - raise ValidationError({ 'data_catalog': ['identifier %s not found' % str(identifier_value)]}) + raise ValidationError( + {"data_catalog": ["identifier %s not found" % str(identifier_value)]} + ) def _migration_override_requested(self): """ Check presence of query parameter ?migration_override, which enables some specific actions during the request, at this point useful only for migration operations. """ - migration_override = CommonService.get_boolean_query_param(self.context['request'], 'migration_override') + migration_override = CommonService.get_boolean_query_param( + self.context["request"], "migration_override" + ) if migration_override and self._request_by_end_user(): - raise Http403({ 'detail': [ 'Parameter migration_override not permitted for end users' ]}) + raise Http403({"detail": ["Parameter migration_override not permitted for end users"]}) return migration_override def _set_dataset_schema(self): data_catalog = None if self._operation_is_create: try: - data_catalog_id = self._get_id_from_related_object('data_catalog', self._get_data_catalog_relation) + data_catalog_id = self._get_id_from_related_object( + "data_catalog", self._get_data_catalog_relation + ) data_catalog = DataCatalog.objects.get(pk=data_catalog_id) except: # whatever error happened with data catalog handling - invalid data_catalog @@ -696,8 +771,10 @@ def _set_dataset_schema(self): data_catalog = self.instance.data_catalog if data_catalog: - schema_prefix = data_catalog.catalog_json.get('research_dataset_schema', None) + schema_prefix = data_catalog.catalog_json.get("research_dataset_schema", None) else: schema_prefix = None - self.json_schema = CommonService.get_json_schema(self._schemas_directory_path, 'dataset', schema_prefix) + self.json_schema = CommonService.get_json_schema( + self._schemas_directory_path, "dataset", schema_prefix + ) diff --git a/src/metax_api/api/rest/base/serializers/common_serializer.py b/src/metax_api/api/rest/base/serializers/common_serializer.py index 09ee5535..d44fe618 100755 --- a/src/metax_api/api/rest/base/serializers/common_serializer.py +++ b/src/metax_api/api/rest/base/serializers/common_serializer.py @@ -30,26 +30,26 @@ class CommonSerializer(ModelSerializer): class Meta: model = Common fields = ( - 'user_modified', - 'date_modified', - 'user_created', - 'date_created', - 'service_modified', - 'service_created', - 'removed', - 'date_removed' + "user_modified", + "date_modified", + "user_created", + "date_created", + "service_modified", + "service_created", + "removed", + "date_removed", ) extra_kwargs = { # not required during creation, or updating # they would be overwritten by the api anyway. # except for user_modified can and should # be given by the requestor if possible. - 'user_modified': { 'required': False }, - 'date_modified': { 'required': False }, - 'user_created': { 'required': False }, - 'date_created': { 'required': False }, - 'service_modified': { 'required': False }, - 'service_created': { 'required': False }, + "user_modified": {"required": False}, + "date_modified": {"required": False}, + "user_created": {"required": False}, + "date_created": {"required": False}, + "service_modified": {"required": False}, + "service_created": {"required": False}, } _operation_is_update = False @@ -68,11 +68,11 @@ def __init__(self, *args, **kwargs): super(CommonSerializer, self).__init__(*args, **kwargs) - if hasattr(self, 'initial_data') and self.initial_data is not None: - self.initial_data.pop('removed', None) - self.initial_data.pop('date_removed', None) + if hasattr(self, "initial_data") and self.initial_data is not None: + self.initial_data.pop("removed", None) + self.initial_data.pop("date_removed", None) - if hasattr(self, 'instance') and self.instance is not None: + if hasattr(self, "instance") and self.instance is not None: self._operation_is_update = True else: self._operation_is_create = True @@ -89,11 +89,11 @@ def __init__(self, *args, **kwargs): # to do so. solution: read the docs and be aware of it. self.partial = True - if 'only_fields' in kwargs: - self.requested_fields = kwargs.pop('only_fields') + if "only_fields" in kwargs: + self.requested_fields = kwargs.pop("only_fields") - elif 'request' in self.context and 'fields' in self.context['request'].query_params: - self.requested_fields = self.context['view'].fields + elif "request" in self.context and "fields" in self.context["request"].query_params: + self.requested_fields = self.context["view"].fields @transaction.atomic def save(self, *args, **kwargs): @@ -114,12 +114,12 @@ def save(self, *args, **kwargs): with metadata_version_identifier generation, file changes handling, alternate_record_set and versions handling. """ - if hasattr(self, 'instance') and self.instance is not None: + if hasattr(self, "instance") and self.instance is not None: # update operation. # request cant be passed as __request inside kwargs (as is done # when creating records), due to some 'known fields only' validations # along the way... this seems to be most convenient. - self.instance.request = self.context.get('request', None) + self.instance.request = self.context.get("request", None) super().save(*args, **kwargs) def create(self, validated_data): @@ -127,7 +127,7 @@ def create(self, validated_data): # the request object needs to be passed along the general parameters. # luckily, the validate_data is just passed to the Model __init__ # as **validated_data, and all our extra kwargs can ride along. - validated_data['__request'] = self.context.get('request', None) + validated_data["__request"] = self.context.get("request", None) return super().create(validated_data) def to_representation(self, instance): @@ -172,12 +172,15 @@ def expand_relation_requested(self, relation_name): to decide whether or not to include the complete relation object in the API response or not. """ - if 'view' in self.context and 'expand_relation' in self.context['view'].request.query_params: - return relation_name in self.context['view'].request.query_params['expand_relation'] + if ( + "view" in self.context + and "expand_relation" in self.context["view"].request.query_params + ): + return relation_name in self.context["view"].request.query_params["expand_relation"] return False def _get_id_from_related_object(self, relation_field, string_relation_func): - ''' + """ Use for finding out a related object's id, which Django needs to save the relation to the database. The related object, or its id, or identifier should be present in the initial data's relation field. @@ -186,7 +189,7 @@ def _get_id_from_related_object(self, relation_field, string_relation_func): :return: id of the related object :string_relation_func: a function which will be called to retrieve the related object in case the relation is a string identifier. - ''' + """ identifier_value = self.initial_data[relation_field] if isinstance(identifier_value, int): @@ -204,30 +207,45 @@ def _get_id_from_related_object(self, relation_field, string_relation_func): # the actual related object as a dict. it is expected to be # in un-tampered form with normal fields present, since # relation fields can not be updated through another object - if 'id' in identifier_value: + if "id" in identifier_value: try: - return int(identifier_value['id']) + return int(identifier_value["id"]) except: - raise ValidationError({relation_field: ['Validation error for relation id field. ' - 'Data in unexpected format']}) + raise ValidationError( + { + relation_field: [ + "Validation error for relation id field. " + "Data in unexpected format" + ] + } + ) else: # try to look for identifier field in the dict - return string_relation_func(identifier_value['identifier']) - raise ValidationError({ relation_field: [ - 'Relation dict does not have any fields to identify relation with (id or identifier)'] }) + return string_relation_func(identifier_value["identifier"]) + raise ValidationError( + { + relation_field: [ + "Relation dict does not have any fields to identify relation with (id or identifier)" + ] + } + ) else: - _logger.error('is_valid() field validation for relation %s: unexpected type: %s' - % (relation_field, type(identifier_value))) - raise ValidationError('Validation error for relation %s. Data in unexpected format' % relation_field) + _logger.error( + "is_valid() field validation for relation %s: unexpected type: %s" + % (relation_field, type(identifier_value)) + ) + raise ValidationError( + "Validation error for relation %s. Data in unexpected format" % relation_field + ) def _request_by_end_user(self): - return 'request' in self.context and not self.context['request'].user.is_service + return "request" in self.context and not self.context["request"].user.is_service def _request_by_service(self): - return 'request' in self.context and self.context['request'].user.is_service + return "request" in self.context and self.context["request"].user.is_service -class LightSerializer(): +class LightSerializer: """ LightSerializer is optimized for speed for read-only serializing of a @@ -302,15 +320,23 @@ def ls_field_list(cls, received_field_list=[]): This default method only filters out fields that are not ine the allowed_fields list. """ - assert isinstance(cls.allowed_fields, set), 'light serializer must specify allowed_fields as a set()' + assert isinstance( + cls.allowed_fields, set + ), "light serializer must specify allowed_fields as a set()" if received_field_list: # ensure only allowed fields and no rubbish ends up in the query - unknown # fields to the db will cause crash. - field_list = [ field for field in received_field_list if field in cls.allowed_fields ] + field_list = [field for field in received_field_list if field in cls.allowed_fields] if not field_list: - raise Http400({ 'detail': ['uh oh, none of the fields you requested are listed in allowed_fields. ' - 'received fields: %s' % str(received_field_list)] }) + raise Http400( + { + "detail": [ + "uh oh, none of the fields you requested are listed in allowed_fields. " + "received fields: %s" % str(received_field_list) + ] + } + ) else: # get all fields @@ -328,9 +354,17 @@ def serialize(cls, unserialized_data): The end result is supposed to look the same as normally from a serializer. """ - assert type(unserialized_data) in (QuerySet, dict, list), 'unserialized_data type must be QuerySet or dict' - assert isinstance(cls.special_fields, set), 'light serializer must specify special_fields as a set()' - assert isinstance(cls.relation_fields, set), 'light serializer must specify relation_fields as a set()' + assert type(unserialized_data) in ( + QuerySet, + dict, + list, + ), "unserialized_data type must be QuerySet or dict" + assert isinstance( + cls.special_fields, set + ), "light serializer must specify special_fields as a set()" + assert isinstance( + cls.relation_fields, set + ), "light serializer must specify relation_fields as a set()" special_fields = cls.special_fields relation_fields = cls.relation_fields @@ -338,7 +372,7 @@ def serialize(cls, unserialized_data): _relation_id_fields = cls._relation_id_fields for field in relation_fields: - _relation_id_fields.add('%s_id' % field) + _relation_id_fields.add("%s_id" % field) if isinstance(unserialized_data, (QuerySet, list)): unserialized_data = unserialized_data @@ -359,18 +393,18 @@ def serialize(cls, unserialized_data): continue elif field in special_fields: serialize_special_field(item, field, value) - elif '__' in field: + elif "__" in field: # fields from relations, such as parent_directory__identifier - field_sections = field.split('__') + field_sections = field.split("__") field_name = field_sections[0] - if field.endswith('_json'): + if field.endswith("_json"): # _json -ending fields are a special case. only handles # fields identifier and id. if field_name not in item: item[field_name] = {} if isinstance(value, dict): # take identifier from inside the json-field - item[field_name] = { 'identifier': value['identifier'] } + item[field_name] = {"identifier": value["identifier"]} else: # id item[field_name][field_sections[-1]] = value @@ -379,13 +413,12 @@ def serialize(cls, unserialized_data): try: item[field_name][field_sections[-1]] = value except KeyError: - item[field_name] = { field_sections[-1]: value } + item[field_name] = {field_sections[-1]: value} elif field in _relation_id_fields: raise ValueError( - 'LightSerializer received field: %s. Expected: %s__attrname. ' - 'Did you forget to pass result of ls_field_list() to queryset ' - '.values(*field_list)?' - % (field, field[:-3]) + "LightSerializer received field: %s. Expected: %s__attrname. " + "Did you forget to pass result of ls_field_list() to queryset " + ".values(*field_list)?" % (field, field[:-3]) ) else: if isinstance(value, datetime): diff --git a/src/metax_api/api/rest/base/serializers/contract_serializer.py b/src/metax_api/api/rest/base/serializers/contract_serializer.py index ad8ad87f..9fa4e167 100755 --- a/src/metax_api/api/rest/base/serializers/contract_serializer.py +++ b/src/metax_api/api/rest/base/serializers/contract_serializer.py @@ -14,22 +14,21 @@ class ContractSerializer(CommonSerializer): - class Meta: model = Contract fields = ( - 'id', - 'contract_json', + "id", + "contract_json", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs def validate_contract_json(self, value): - validate_json(value, self.context['view'].json_schema) + validate_json(value, self.context["view"].json_schema) if self._operation_is_create: self._validate_identifier_uniqueness(value) return value def _validate_identifier_uniqueness(self, contract_json): - if Contract.objects.filter(contract_json__identifier=contract_json['identifier']).exists(): + if Contract.objects.filter(contract_json__identifier=contract_json["identifier"]).exists(): raise ValidationError(f"identifier {contract_json['identifier']} already exists") diff --git a/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py b/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py index 8491c012..bd6294ad 100755 --- a/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py +++ b/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py @@ -17,50 +17,56 @@ class DataCatalogSerializer(CommonSerializer): - class Meta: model = DataCatalog fields = ( - 'id', - 'catalog_json', - 'catalog_record_group_edit', - 'catalog_record_group_create', - 'catalog_record_services_edit', - 'catalog_record_services_create', - 'catalog_record_group_read', - 'catalog_record_services_read', + "id", + "catalog_json", + "catalog_record_group_edit", + "catalog_record_group_create", + "catalog_record_services_edit", + "catalog_record_services_create", + "catalog_record_group_read", + "catalog_record_services_read", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs def is_valid(self, raise_exception=False): super(DataCatalogSerializer, self).is_valid(raise_exception=raise_exception) - if 'catalog_json' in self.initial_data: + if "catalog_json" in self.initial_data: self._validate_dataset_schema() - if self.initial_data['catalog_json'].get('dataset_versioning', False) is True \ - and self.initial_data['catalog_json'].get('harvested', False) is True: - raise ValidationError({ - 'detail': ['versioning cannot be enabled in harvested catalogs.'] - }) + if ( + self.initial_data["catalog_json"].get("dataset_versioning", False) is True + and self.initial_data["catalog_json"].get("harvested", False) is True + ): + raise ValidationError( + {"detail": ["versioning cannot be enabled in harvested catalogs."]} + ) def validate_catalog_json(self, value): - DCS.validate_reference_data(value, self.context['view'].cache) + DCS.validate_reference_data(value, self.context["view"].cache) # Validate against json schema only after reference data has been populated - validate_json(value, self.context['view'].json_schema) + validate_json(value, self.context["view"].json_schema) if self._operation_is_create: self._validate_identifier_uniqueness(value) return value def _validate_dataset_schema(self): - rd_schema = self.initial_data['catalog_json'].get('research_dataset_schema', None) + rd_schema = self.initial_data["catalog_json"].get("research_dataset_schema", None) if not rd_schema: return - schema_path = '%s/../schemas/%s_dataset_schema.json' % (path.dirname(__file__), rd_schema) + schema_path = "%s/../schemas/%s_dataset_schema.json" % ( + path.dirname(__file__), + rd_schema, + ) if not path.isfile(schema_path): - raise ValidationError({'catalog_json': ['research dataset schema \'%s\' not found' % rd_schema]}) + raise ValidationError( + {"catalog_json": ["research dataset schema '%s' not found" % rd_schema]} + ) def _validate_identifier_uniqueness(self, catalog_json): - if DataCatalog.objects.filter(catalog_json__identifier=catalog_json['identifier']).exists(): - raise ValidationError({'identifier': - ['identifier %s already exists' % catalog_json['identifier']] - }) \ No newline at end of file + if DataCatalog.objects.filter(catalog_json__identifier=catalog_json["identifier"]).exists(): + raise ValidationError( + {"identifier": ["identifier %s already exists" % catalog_json["identifier"]]} + ) diff --git a/src/metax_api/api/rest/base/serializers/directory_serializer.py b/src/metax_api/api/rest/base/serializers/directory_serializer.py index 14dabfd2..7f467f25 100755 --- a/src/metax_api/api/rest/base/serializers/directory_serializer.py +++ b/src/metax_api/api/rest/base/serializers/directory_serializer.py @@ -29,28 +29,29 @@ class DirectorySerializer(CommonSerializer): class Meta: model = Directory fields = ( - 'id', - 'byte_size', - 'directory_deleted', - 'directory_modified', - 'directory_name', - 'directory_path', - 'file_count', - 'identifier', - 'parent_directory', - 'project_identifier', - 'user_modified', - 'date_modified', - 'user_created', - 'date_created', + "id", + "byte_size", + "directory_deleted", + "directory_modified", + "directory_name", + "directory_path", + "file_count", + "identifier", + "parent_directory", + "project_identifier", + "user_modified", + "date_modified", + "user_created", + "date_created", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs def is_valid(self, raise_exception=False): - if 'parent_directory' in self.initial_data: - self.initial_data['parent_directory'] = self._get_id_from_related_object( - 'parent_directory', self._get_parent_directory_relation) + if "parent_directory" in self.initial_data: + self.initial_data["parent_directory"] = self._get_id_from_related_object( + "parent_directory", self._get_parent_directory_relation + ) super(DirectorySerializer, self).is_valid(raise_exception=raise_exception) def validate_directory_path(self, value): @@ -62,25 +63,30 @@ def validate_directory_path(self, value): since directories can be frozen and unfrozen multiple times. """ dir_exists_in_project_scope = Directory.objects.filter( - project_identifier=self.initial_data['project_identifier'], - directory_path=self.initial_data['directory_path'] + project_identifier=self.initial_data["project_identifier"], + directory_path=self.initial_data["directory_path"], ).exists() if dir_exists_in_project_scope: - raise ValidationError([ - 'directory path %s already exists in project %s scope. Are you trying to freeze same directory again?' - % (self.initial_data['directory_path'], self.initial_data['project_identifier']) - ]) + raise ValidationError( + [ + "directory path %s already exists in project %s scope. Are you trying to freeze same directory again?" + % ( + self.initial_data["directory_path"], + self.initial_data["project_identifier"], + ) + ] + ) return value def to_representation(self, instance): res = super(DirectorySerializer, self).to_representation(instance) - if 'parent_directory' in res and instance.parent_directory: - res['parent_directory'] = { - 'id': instance.parent_directory.id, - 'identifier': instance.parent_directory.identifier, + if "parent_directory" in res and instance.parent_directory: + res["parent_directory"] = { + "id": instance.parent_directory.id, + "identifier": instance.parent_directory.identifier, } return res @@ -90,18 +96,20 @@ def _get_parent_directory_relation(self, identifier_value): Passed to _get_id_from_related_object() to be used when relation was a string identifier """ if isinstance(identifier_value, dict): - identifier_value = identifier_value['identifier'] + identifier_value = identifier_value["identifier"] try: return Directory.objects.get(identifier=identifier_value).id except Directory.DoesNotExist: - raise ValidationError({ 'parent_directory': ['identifier %s not found' % str(identifier_value)]}) + raise ValidationError( + {"parent_directory": ["identifier %s not found" % str(identifier_value)]} + ) class LightDirectorySerializer(LightSerializer): allowed_fields = set(DirectorySerializer.Meta.fields) special_fields = set() - relation_fields = set(['parent_directory']) + relation_fields = set(["parent_directory"]) @classmethod def ls_field_list(cls, received_field_list=[]): @@ -116,8 +124,8 @@ def ls_field_list(cls, received_field_list=[]): continue elif field in cls.relation_fields: # parent directory - return_file_fields.append('%s__identifier' % field) - return_file_fields.append('%s__id' % field) + return_file_fields.append("%s__identifier" % field) + return_file_fields.append("%s__id" % field) else: return_file_fields.append(field) diff --git a/src/metax_api/api/rest/base/serializers/file_serializer.py b/src/metax_api/api/rest/base/serializers/file_serializer.py index 40706cf6..04e66973 100755 --- a/src/metax_api/api/rest/base/serializers/file_serializer.py +++ b/src/metax_api/api/rest/base/serializers/file_serializer.py @@ -24,22 +24,22 @@ END_USER_UPDATE_ALLOWED_FIELDS = [ - 'file_characteristics', - + "file_characteristics", # not set by the user, but are set by metax, so should not be discarded - 'date_modified', - 'user_modified', - 'service_modified', - '__request' + "date_modified", + "user_modified", + "service_modified", + "__request", ] CHECKSUM_ALGORITHMS = settings.CHECKSUM_ALGORITHMS + class FileSerializer(CommonSerializer): - checksum_fields = set(['algorithm', 'checked', 'value']) + checksum_fields = set(["algorithm", "checked", "value"]) # has to be present to not break rest_framework browsabale api - checksum = serializers.CharField(source='checksum_value', read_only=True) + checksum = serializers.CharField(source="checksum_value", read_only=True) # file paths must preserve any leading or trailing whitespace, as those # are valid characters in a filename. note: file_path is actually a @@ -50,34 +50,38 @@ class FileSerializer(CommonSerializer): # identifier is not set as unique in the db due to having to allow removed files exist. # use the same validator that would otherwise automatically be used, to verify uniqueness # among non-removed files. - identifier = serializers.CharField(validators=[UniqueValidator( - queryset=File.objects.all(), - message='a file with given identifier already exists' - )]) + identifier = serializers.CharField( + validators=[ + UniqueValidator( + queryset=File.objects.all(), + message="a file with given identifier already exists", + ) + ] + ) class Meta: model = File fields = ( - 'id', - 'byte_size', - 'checksum', - 'checksum_algorithm', - 'checksum_checked', - 'checksum_value', - 'parent_directory', - 'file_deleted', - 'file_frozen', - 'file_format', - 'file_modified', - 'file_name', - 'file_path', - 'file_storage', - 'file_uploaded', - 'identifier', - 'file_characteristics', - 'file_characteristics_extension', - 'open_access', - 'project_identifier', + "id", + "byte_size", + "checksum", + "checksum_algorithm", + "checksum_checked", + "checksum_value", + "parent_directory", + "file_deleted", + "file_frozen", + "file_format", + "file_modified", + "file_name", + "file_path", + "file_storage", + "file_uploaded", + "identifier", + "file_characteristics", + "file_characteristics_extension", + "open_access", + "project_identifier", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs @@ -86,53 +90,57 @@ def is_valid(self, raise_exception=False): if self._request_by_end_user(): self._end_user_update_validations(self.initial_data) - if 'file_storage' in self.initial_data: - self.initial_data['file_storage'] = self._get_id_from_related_object( - 'file_storage', self._get_file_storage_relation) + if "file_storage" in self.initial_data: + self.initial_data["file_storage"] = self._get_id_from_related_object( + "file_storage", self._get_file_storage_relation + ) - if 'checksum' in self.initial_data: - self._flatten_checksum(self.initial_data['checksum']) + if "checksum" in self.initial_data: + self._flatten_checksum(self.initial_data["checksum"]) - if 'parent_directory' in self.initial_data: - self.initial_data['parent_directory'] = self._get_id_from_related_object( - 'parent_directory', self._get_parent_directory_relation) + if "parent_directory" in self.initial_data: + self.initial_data["parent_directory"] = self._get_id_from_related_object( + "parent_directory", self._get_parent_directory_relation + ) - if ('file_characteristics' in self.initial_data and 'format_version' - in self.initial_data['file_characteristics']): - if self.initial_data['file_characteristics']['format_version'] == "": - self.initial_data['file_characteristics'].pop('format_version', None) + if ( + "file_characteristics" in self.initial_data + and "format_version" in self.initial_data["file_characteristics"] + ): + if self.initial_data["file_characteristics"]["format_version"] == "": + self.initial_data["file_characteristics"].pop("format_version", None) super(FileSerializer, self).is_valid(raise_exception=raise_exception) def to_representation(self, instance): res = super(FileSerializer, self).to_representation(instance) - if 'file_storage' in res: - if self.expand_relation_requested('file_storage'): - res['file_storage'] = FileStorageSerializer(instance.file_storage).data + if "file_storage" in res: + if self.expand_relation_requested("file_storage"): + res["file_storage"] = FileStorageSerializer(instance.file_storage).data else: - res['file_storage'] = { - 'id': instance.file_storage.id, - 'identifier': instance.file_storage.file_storage_json['identifier'], + res["file_storage"] = { + "id": instance.file_storage.id, + "identifier": instance.file_storage.file_storage_json["identifier"], } - if 'parent_directory' in res: - if self.expand_relation_requested('parent_directory'): - res['parent_directory'] = DirectorySerializer(instance.parent_directory).data + if "parent_directory" in res: + if self.expand_relation_requested("parent_directory"): + res["parent_directory"] = DirectorySerializer(instance.parent_directory).data else: - res['parent_directory'] = { - 'id': instance.parent_directory.id, - 'identifier': instance.parent_directory.identifier, + res["parent_directory"] = { + "id": instance.parent_directory.id, + "identifier": instance.parent_directory.identifier, } - if not self.requested_fields or any('checksum' in f for f in self.requested_fields): - res['checksum'] = self.form_checksum(res) + if not self.requested_fields or any("checksum" in f for f in self.requested_fields): + res["checksum"] = self.form_checksum(res) return res def validate_file_characteristics(self, value): - validate_json(value, self.context['view'].json_schema) - FS.validate_file_characteristics_reference_data(value, self.context['view'].cache) + validate_json(value, self.context["view"].json_schema) + FS.validate_file_characteristics_reference_data(value, self.context["view"].cache) return value def validate_file_path(self, value): @@ -140,26 +148,28 @@ def validate_file_path(self, value): Ensure file_path is unique in the project, within unremoved files. file_path can exist multiple times for removed files though. """ - if hasattr(self, 'file_path_checked'): + if hasattr(self, "file_path_checked"): # has been previously validated during bulk operation processing. # saves a fetch to the db. return value if self._operation_is_create: - if 'project_identifier' not in self.initial_data: + if "project_identifier" not in self.initial_data: # the validation for project_identifier is executed later... return value - project = self.initial_data['project_identifier'] + project = self.initial_data["project_identifier"] if File.objects.filter(project_identifier=project, file_path=value).exists(): - raise ValidationError('a file with path %s already exists in project %s' % (value, project)) + raise ValidationError( + "a file with path %s already exists in project %s" % (value, project) + ) elif self._operation_is_update: - if 'file_path' not in self.initial_data: + if "file_path" not in self.initial_data: return value - if self.instance.file_path != self.initial_data['file_path']: + if self.instance.file_path != self.initial_data["file_path"]: # would require re-arranging the virtual file tree... implement in the future if need arises - raise ValidationError('file_path can not be changed after creating') + raise ValidationError("file_path can not be changed after creating") else: # delete pass @@ -172,8 +182,11 @@ def validate_checksum_algorithm(self, value): """ if value not in CHECKSUM_ALGORITHMS: - raise ValidationError('file checksum_algorithm should be one of {}, now {}' - .format(CHECKSUM_ALGORITHMS, value)) + raise ValidationError( + "file checksum_algorithm should be one of {}, now {}".format( + CHECKSUM_ALGORITHMS, value + ) + ) return value @@ -181,7 +194,9 @@ def _end_user_update_validations(self, validated_data): """ Enforce some rules related to end users when updating files. """ - fields_to_discard = [ key for key in validated_data.keys() if key not in END_USER_UPDATE_ALLOWED_FIELDS ] + fields_to_discard = [ + key for key in validated_data.keys() if key not in END_USER_UPDATE_ALLOWED_FIELDS + ] for field_name in fields_to_discard: del validated_data[field_name] @@ -190,13 +205,15 @@ def _get_file_storage_relation(self, identifier_value): Passed to _get_id_from_related_object() to be used when relation was a string identifier """ if isinstance(identifier_value, dict): - identifier_value = identifier_value['file_storage_json']['identifier'] + identifier_value = identifier_value["file_storage_json"]["identifier"] try: return FileStorage.objects.get( - file_storage_json__contains={ 'identifier': identifier_value } + file_storage_json__contains={"identifier": identifier_value} ).id except FileStorage.DoesNotExist: - raise ValidationError({ 'file_storage': ['identifier %s not found' % str(identifier_value)]}) + raise ValidationError( + {"file_storage": ["identifier %s not found" % str(identifier_value)]} + ) def _flatten_checksum(self, checksum): """ @@ -215,7 +232,7 @@ def _flatten_checksum(self, checksum): """ for key in self.checksum_fields: if key in checksum: - self.initial_data['checksum_%s' % key] = checksum[key] + self.initial_data["checksum_%s" % key] = checksum[key] @classmethod def form_checksum(cls, file_data): @@ -225,7 +242,7 @@ def form_checksum(cls, file_data): """ checksum = {} for key in cls.checksum_fields: - checksum_field = 'checksum_%s' % key + checksum_field = "checksum_%s" % key if checksum_field in file_data: checksum[key] = file_data[checksum_field] file_data.pop(checksum_field) @@ -236,18 +253,20 @@ def _get_parent_directory_relation(self, identifier_value): Passed to _get_id_from_related_object() to be used when relation was a string identifier """ if isinstance(identifier_value, dict): - identifier_value = identifier_value['identifier'] + identifier_value = identifier_value["identifier"] try: return Directory.objects.get(identifier=identifier_value).id except Directory.DoesNotExist: - raise ValidationError({ 'parent_directory': ['identifier %s not found' % str(identifier_value)]}) + raise ValidationError( + {"parent_directory": ["identifier %s not found" % str(identifier_value)]} + ) class LightFileSerializer(LightSerializer): allowed_fields = set(FileSerializer.Meta.fields) - relation_fields = set(['file_storage', 'parent_directory']) - special_fields = set([ 'checksum_%s' % field for field in FileSerializer.checksum_fields ]) + relation_fields = set(["file_storage", "parent_directory"]) + special_fields = set(["checksum_%s" % field for field in FileSerializer.checksum_fields]) @classmethod def ls_field_list(cls, received_field_list=[]): @@ -257,24 +276,24 @@ def ls_field_list(cls, received_field_list=[]): field_list = super().ls_field_list(received_field_list) if not received_field_list: # a field of this name does not exist in the db - remove or will cause crash - field_list.discard('checksum') + field_list.discard("checksum") return_file_fields = [] for field in field_list: if field not in cls.allowed_fields: continue - elif field == 'checksum': + elif field == "checksum": for checksum_field in FileSerializer.checksum_fields: - return_file_fields.append('checksum_%s' % checksum_field) + return_file_fields.append("checksum_%s" % checksum_field) break elif field in cls.relation_fields: - if field == 'file_storage': - return_file_fields.append('%s__file_storage_json' % field) - return_file_fields.append('%s__id' % field) + if field == "file_storage": + return_file_fields.append("%s__file_storage_json" % field) + return_file_fields.append("%s__id" % field) else: # parent directory - return_file_fields.append('%s__identifier' % field) - return_file_fields.append('%s__id' % field) + return_file_fields.append("%s__identifier" % field) + return_file_fields.append("%s__id" % field) else: return_file_fields.append(field) @@ -282,8 +301,8 @@ def ls_field_list(cls, received_field_list=[]): @staticmethod def serialize_special_field(file, field, value): - if field.startswith('checksum'): + if field.startswith("checksum"): try: - file['checksum'][field.split('_')[1]] = value + file["checksum"][field.split("_")[1]] = value except KeyError: - file['checksum'] = { field.split('_')[1]: value } + file["checksum"] = {field.split("_")[1]: value} diff --git a/src/metax_api/api/rest/base/serializers/file_storage_serializer.py b/src/metax_api/api/rest/base/serializers/file_storage_serializer.py index f8286175..093993a3 100755 --- a/src/metax_api/api/rest/base/serializers/file_storage_serializer.py +++ b/src/metax_api/api/rest/base/serializers/file_storage_serializer.py @@ -14,25 +14,26 @@ class FileStorageSerializer(CommonSerializer): - class Meta: model = FileStorage fields = ( - 'id', - 'file_storage_json', + "id", + "file_storage_json", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs def validate_file_storage_json(self, value): - validate_json(value, self.context['view'].json_schema) + validate_json(value, self.context["view"].json_schema) if self._operation_is_create: self._validate_identifier_uniqueness(value) return value def _validate_identifier_uniqueness(self, file_storage_json): - if FileStorage.objects.filter(file_storage_json__identifier=file_storage_json['identifier']).exists(): - raise ValidationError({'identifier': - ['identifier %s already exists' % file_storage_json['identifier']] - }) + if FileStorage.objects.filter( + file_storage_json__identifier=file_storage_json["identifier"] + ).exists(): + raise ValidationError( + {"identifier": ["identifier %s already exists" % file_storage_json["identifier"]]} + ) diff --git a/src/metax_api/api/rest/base/serializers/serializer_utils.py b/src/metax_api/api/rest/base/serializers/serializer_utils.py index 88a9355a..1f457874 100755 --- a/src/metax_api/api/rest/base/serializers/serializer_utils.py +++ b/src/metax_api/api/rest/base/serializers/serializer_utils.py @@ -12,16 +12,13 @@ from jsonschema.exceptions import ValidationError as JsonValidationError from rest_framework.serializers import ValidationError -date_re = re.compile( - r'^\d{4}-\d{2}-\d{2}$' -) +date_re = re.compile(r"^\d{4}-\d{2}-\d{2}$") datetime_re = re.compile( - r'^\d{4}-\d{2}-\d{2}T' - r'\d{2}:\d{2}:\d{2}(\.\d{1,9})?' - r'(Z|[+-]\d{2}:\d{2})$' + r"^\d{4}-\d{2}-\d{2}T" r"\d{2}:\d{2}:\d{2}(\.\d{1,9})?" r"(Z|[+-]\d{2}:\d{2})$" ) + def validate_json(value, schema): """ Since RFC3339 dependency was removed, date and datetime formats have to be validated @@ -41,10 +38,13 @@ def validate_json(value, schema): path = e.path schema = e.schema - raise ValidationError('%s. Json path: %s. Schema: %s' % (message, [p for p in path], schema)) + raise ValidationError( + "%s. Json path: %s. Schema: %s" % (message, [p for p in path], schema) + ) + # helper methods for date and datetime validation -@FormatChecker.checks(FormatChecker, format='date') +@FormatChecker.checks(FormatChecker, format="date") def date(value): if isinstance(value, str_types): match = date_re.match(value) @@ -53,7 +53,8 @@ def date(value): return False -@FormatChecker.checks(FormatChecker, format='date-time') + +@FormatChecker.checks(FormatChecker, format="date-time") def date_time(value): if isinstance(value, str_types): match = datetime_re.match(value) diff --git a/src/metax_api/api/rest/base/serializers/xml_metadata_serializer.py b/src/metax_api/api/rest/base/serializers/xml_metadata_serializer.py index c6aa042b..e7f5845c 100755 --- a/src/metax_api/api/rest/base/serializers/xml_metadata_serializer.py +++ b/src/metax_api/api/rest/base/serializers/xml_metadata_serializer.py @@ -11,23 +11,20 @@ class XmlMetadataSerializer(CommonSerializer): - class Meta: model = XmlMetadata fields = ( - 'id', - 'namespace', - 'xml', - 'file', + "id", + "namespace", + "xml", + "file", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs def to_representation(self, instance): res = super(XmlMetadataSerializer, self).to_representation(instance) - res['file'] = { - 'identifier': self.instance.file.identifier - } + res["file"] = {"identifier": self.instance.file.identifier} return res def validate_xml(self, value): diff --git a/src/metax_api/api/rest/base/views/api_error_view.py b/src/metax_api/api/rest/base/views/api_error_view.py index b9612ec3..1fd58da1 100755 --- a/src/metax_api/api/rest/base/views/api_error_view.py +++ b/src/metax_api/api/rest/base/views/api_error_view.py @@ -39,7 +39,7 @@ class ApiErrorViewSet(CommonViewSet): serializer_class = FileSerializer def initial(self, request, *args, **kwargs): - if request.user.username != 'metax': + if request.user.username != "metax": raise Http403 return super(ApiErrorViewSet, self).initial(request, *args, **kwargs) @@ -48,7 +48,7 @@ def get_queryset(self): def retrieve(self, request, *args, **kwargs): try: - error_details = ApiErrorService.retrieve_error_details(kwargs['pk']) + error_details = ApiErrorService.retrieve_error_details(kwargs["pk"]) except: raise Http404 return Response(data=error_details, status=200) @@ -58,15 +58,15 @@ def list(self, request, *args, **kwargs): return Response(data=error_list, status=200) def destroy(self, request, *args, **kwargs): - _logger.info('DELETE %s called by %s' % (request.META['PATH_INFO'], request.user.username)) - ApiErrorService.remove_error_file(kwargs['pk']) + _logger.info("DELETE %s called by %s" % (request.META["PATH_INFO"], request.user.username)) + ApiErrorService.remove_error_file(kwargs["pk"]) return Response(status=204) - @action(detail=False, methods=['post'], url_path="flush") + @action(detail=False, methods=["post"], url_path="flush") def flush_errors(self, request): - _logger.info('%s called by %s' % (request.META['PATH_INFO'], request.user.username)) + _logger.info("%s called by %s" % (request.META["PATH_INFO"], request.user.username)) files_deleted_count = ApiErrorService.flush_errors() - return Response(data={ 'files_deleted': files_deleted_count }, status=200) + return Response(data={"files_deleted": files_deleted_count}, status=200) def update(self, request, *args, **kwargs): raise Http501() diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index 79bb4b84..b0b771d3 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -18,12 +18,17 @@ from metax_api.exceptions import Http400, Http403, Http500 from metax_api.permissions import EndUserPermissions, ServicePermissions -from metax_api.services import ApiErrorService, CallableService, CommonService as CS, RedisCacheService +from metax_api.services import ( + ApiErrorService, + CallableService, + CommonService as CS, + RedisCacheService, +) _logger = logging.getLogger(__name__) RESPONSE_SUCCESS_CODES = (200, 201, 204) -WRITE_OPERATIONS = ('PUT', 'PATCH', 'POST') +WRITE_OPERATIONS = ("PUT", "PATCH", "POST") class CommonViewSet(ModelViewSet): @@ -33,7 +38,7 @@ class CommonViewSet(ModelViewSet): which include fields like modified and created timestamps, uuid, active flags etc. """ - api_type = 'rest' + api_type = "rest" authentication_classes = () permission_classes = [EndUserPermissions, ServicePermissions] @@ -53,7 +58,9 @@ class CommonViewSet(ModelViewSet): def __init__(self, *args, **kwargs): super(CommonViewSet, self).__init__(*args, **kwargs) - if (hasattr(self, 'object') and self.object) and (not hasattr(self, 'queryset') or self.queryset is None): + if (hasattr(self, "object") and self.object) and ( + not hasattr(self, "queryset") or self.queryset is None + ): # ^ must have attribute 'object' set, AND queryset not set. # the primary location where a queryset is initialized for @@ -67,7 +74,7 @@ def dispatch(self, request, **kwargs): res = super().dispatch(request, **kwargs) if res.status_code in RESPONSE_SUCCESS_CODES: - if CS.get_boolean_query_param(self.request, 'dryrun'): + if CS.get_boolean_query_param(self.request, "dryrun"): # with dryrun parameter: # - nothing must be saved into db # - no events must escape from metax to other services, such as rabbitmq @@ -87,7 +94,8 @@ def get_permissions(self): Instantiates and returns the list of permissions that this view requires. """ return [ - permission() for permission in self.permission_classes + permission() + for permission in self.permission_classes if permission.service_permission == self.request.user.is_service ] @@ -111,8 +119,8 @@ def handle_exception(self, exc): # must convert any standard python exceptions to framework-recognized # exceptions, so that the following handle_exception() goes the expected # path, and prepares for db dollback, sets some headers etc. - _logger.exception('Internal Server Error') - exc = Http500({ 'detail': ['Internal Server Error'] }) + _logger.exception("Internal Server Error") + exc = Http500({"detail": ["Internal Server Error"]}) try: # fyi: when an error occurs during a request, and ATOMIC_REQUESTS=True, @@ -121,9 +129,12 @@ def handle_exception(self, exc): response = super(CommonViewSet, self).handle_exception(exc) except: # for when absolutely everything has gone wrong... - _logger.exception('Exception while trying to handle original exception: %s' % str(exc)) + _logger.exception("Exception while trying to handle original exception: %s" % str(exc)) set_rollback() - response = Response({ 'detail': ['Internal Server Error'] }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + response = Response( + {"detail": ["Internal Server Error"]}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) if type(exc) not in (Http403, Http404, PermissionDenied, MethodNotAllowed): ApiErrorService.store_error_details(self.request, response, exc) @@ -133,12 +144,12 @@ def handle_exception(self, exc): # TODO: supporting both parameters over a transition period and eventually will get rid of no_pagination. def paginate_queryset(self, queryset): keys = self.request.query_params.keys() - if 'pagination' in keys: - if not CS.get_boolean_query_param(self.request, 'pagination'): + if "pagination" in keys: + if not CS.get_boolean_query_param(self.request, "pagination"): return None return super(CommonViewSet, self).paginate_queryset(queryset) - elif 'no_pagination' in keys: - if CS.get_boolean_query_param(self.request, 'no_pagination'): + elif "no_pagination" in keys: + if CS.get_boolean_query_param(self.request, "no_pagination"): return None return super(CommonViewSet, self).paginate_queryset(queryset) else: @@ -156,21 +167,21 @@ def get_queryset(self): CS.set_if_modified_since_filter(self.request, additional_filters) - if hasattr(self, 'queryset_search_params'): + if hasattr(self, "queryset_search_params"): additional_filters.update(**self.queryset_search_params) - if 'q_filters' in additional_filters: + if "q_filters" in additional_filters: # Q-filter objects, which can contain more complex filter options such as OR-clauses - q_filters = additional_filters.pop('q_filters') + q_filters = additional_filters.pop("q_filters") - if CS.get_boolean_query_param(self.request, 'removed'): - additional_filters.update({'removed': True}) + if CS.get_boolean_query_param(self.request, "removed"): + additional_filters.update({"removed": True}) self.queryset = self.queryset_unfiltered - if 'fields' in self.request.query_params: + if "fields" in self.request.query_params: if not self.fields: # save fields when no inheriting view has done it yet - self.fields = self.request.query_params['fields'].split(',') + self.fields = self.request.query_params["fields"].split(",") for field in self.fields: if field not in self.get_serializer_class().Meta.fields: @@ -181,18 +192,18 @@ def get_queryset(self): # check if requested fields are relations, so that we know to include them in select_related. # if no fields is relation, select_related will be made empty. - self.select_related = [ rel for rel in self.select_related if rel in self.fields ] + self.select_related = [rel for rel in self.select_related if rel in self.fields] queryset = super().get_queryset().filter(*q_filters, **additional_filters) - if self.request.META['REQUEST_METHOD'] in WRITE_OPERATIONS: + if self.request.META["REQUEST_METHOD"] in WRITE_OPERATIONS: # for update operations, do not select relations in the original queryset # so that select_for_update() can be used to lock the row for the duration # of the update-operation. when the full object is returned, it is possible # that additional queres need to be executed to the db to retrieve relation # data, but that seems to be the price to pay to be able the lock rows being # written to. - queryset = queryset.select_for_update(nowait=False, of=('self',)) + queryset = queryset.select_for_update(nowait=False, of=("self",)) else: queryset = queryset.select_related(*self.select_related) @@ -205,12 +216,14 @@ def get_object(self, search_params=None): param search_params: pass a custom filter instead of using the default search mechanism """ - if CS.get_boolean_query_param(self.request, 'removed'): + if CS.get_boolean_query_param(self.request, "removed"): return self.get_removed_object(search_params=search_params) elif search_params: filter_kwargs = search_params else: - if CS.is_primary_key(self.kwargs.get(self.lookup_field, False)) or not hasattr(self, 'lookup_field_other'): + if CS.is_primary_key(self.kwargs.get(self.lookup_field, False)) or not hasattr( + self, "lookup_field_other" + ): # lookup by originak lookup_field. standard django procedure lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field else: @@ -220,7 +233,7 @@ def get_object(self, search_params=None): # replace original field name with field name in lookup_field_other self.kwargs[lookup_url_kwarg] = self.kwargs.get(self.lookup_field) - filter_kwargs = { lookup_url_kwarg: self.kwargs[lookup_url_kwarg] } + filter_kwargs = {lookup_url_kwarg: self.kwargs[lookup_url_kwarg]} queryset = self.filter_queryset(self.get_queryset()) @@ -248,9 +261,9 @@ def get_removed_object(self, search_params=None): if not search_params: lookup_value = self.kwargs.get(self.lookup_field) if CS.is_primary_key(lookup_value): - search_params = { 'pk': lookup_value } - elif hasattr(self, 'lookup_field_other'): - search_params = { self.lookup_field_other: lookup_value } + search_params = {"pk": lookup_value} + elif hasattr(self, "lookup_field_other"): + search_params = {self.lookup_field_other: lookup_value} else: raise Http404 @@ -266,7 +279,7 @@ def update(self, request, *args, **kwargs): def update_bulk(self, request, *args, **kwargs): serializer_class = self.get_serializer_class() - kwargs['context'] = self.get_serializer_context() + kwargs["context"] = self.get_serializer_context() results, http_status = CS.update_bulk(request, self.object, serializer_class, **kwargs) response = Response(results, status=http_status) self._check_and_store_bulk_error(request, response) @@ -274,14 +287,14 @@ def update_bulk(self, request, *args, **kwargs): def partial_update(self, request, *args, **kwargs): CS.update_common_info(request) - kwargs['partial'] = True + kwargs["partial"] = True res = super(CommonViewSet, self).update(request, *args, **kwargs) return res def partial_update_bulk(self, request, *args, **kwargs): serializer_class = self.get_serializer_class() - kwargs['context'] = self.get_serializer_context() - kwargs['partial'] = True + kwargs["context"] = self.get_serializer_context() + kwargs["partial"] = True results, http_status = CS.update_bulk(request, self.object, serializer_class, **kwargs) response = Response(results, status=http_status) self._check_and_store_bulk_error(request, response) @@ -289,7 +302,7 @@ def partial_update_bulk(self, request, *args, **kwargs): def create(self, request, *args, **kwargs): serializer_class = self.get_serializer_class() - kwargs['context'] = self.get_serializer_context() + kwargs["context"] = self.get_serializer_context() results, http_status = self.create_bulk_method(request, serializer_class, **kwargs) response = Response(results, status=http_status) self._check_and_store_bulk_error(request, response) @@ -308,9 +321,9 @@ def initialize_request(self, request, *args, **kwargs): Overrided from rest_framework to preserve the username and other variables set during identifyapicaller middleware. """ - username = request.user.username if hasattr(request.user, 'username') else None - is_service = request.user.is_service if hasattr(request.user, 'is_service') else False - token = request.user.token if hasattr(request.user, 'token') else None + username = request.user.username if hasattr(request.user, "username") else None + is_service = request.user.is_service if hasattr(request.user, "is_service") else False + token = request.user.token if hasattr(request.user, "token") else None drf_req = super(CommonViewSet, self).initialize_request(request, *args, **kwargs) @@ -330,8 +343,10 @@ def set_json_schema(self, view_file): always looks for the schema from a directory relative to the view's location, taking into account its version. """ - self.json_schema = CS.get_json_schema(path.dirname(view_file) + '/../schemas', - self.__class__.__name__.lower()[:-(len('viewset'))]) + self.json_schema = CS.get_json_schema( + path.dirname(view_file) + "/../schemas", + self.__class__.__name__.lower()[: -(len("viewset"))], + ) def _check_and_store_bulk_error(self, request, response): """ @@ -339,8 +354,8 @@ def _check_and_store_bulk_error(self, request, response): error data is not saved. Separately check presence of failures in bulk operations responses, and save data if necessary. """ - if 'failed' in response.data and len(response.data['failed']): - ApiErrorService.store_error_details(request, response, other={ 'bulk_request': True }) + if "failed" in response.data and len(response.data["failed"]): + ApiErrorService.store_error_details(request, response, other={"bulk_request": True}) def get_api_name(self): """ @@ -349,4 +364,4 @@ def get_api_name(self): (for example, directories-api), will inherit this and return a customized result. """ - return '%ss' % self.__class__.__name__.split('ViewSet')[0].lower() + return "%ss" % self.__class__.__name__.split("ViewSet")[0].lower() diff --git a/src/metax_api/api/rest/base/views/contract_view.py b/src/metax_api/api/rest/base/views/contract_view.py index 39c9841d..f3ced7ec 100755 --- a/src/metax_api/api/rest/base/views/contract_view.py +++ b/src/metax_api/api/rest/base/views/contract_view.py @@ -21,7 +21,7 @@ class ContractViewSet(CommonViewSet): serializer_class = ContractSerializer object = Contract - lookup_field = 'pk' + lookup_field = "pk" def __init__(self, *args, **kwargs): self.set_json_schema(__file__) @@ -31,7 +31,7 @@ def get_object(self): lookup_value = self.kwargs.get(self.lookup_field, False) search_params = None if not CommonService.is_primary_key(lookup_value): - search_params = { 'contract_json__contains': { 'identifier': lookup_value }} + search_params = {"contract_json__contains": {"identifier": lookup_value}} return super(ContractViewSet, self).get_object(search_params=search_params) def get_queryset(self): @@ -40,13 +40,14 @@ def get_queryset(self): else: query_params = self.request.query_params additional_filters = {} - if query_params.get('organization', False): - additional_filters['contract_json__contains'] = { - 'organization': { 'organization_identifier': query_params['organization'] }} + if query_params.get("organization", False): + additional_filters["contract_json__contains"] = { + "organization": {"organization_identifier": query_params["organization"]} + } return super(ContractViewSet, self).get_queryset().filter(**additional_filters) - @action(detail=True, methods=['get'], url_path="datasets") + @action(detail=True, methods=["get"], url_path="datasets") def datasets_get(self, request, pk=None): contract = self.get_object() - catalog_records = [ CatalogRecordSerializer(f).data for f in contract.records.all() ] + catalog_records = [CatalogRecordSerializer(f).data for f in contract.records.all()] return Response(data=catalog_records, status=status.HTTP_200_OK) diff --git a/src/metax_api/api/rest/base/views/data_catalog_view.py b/src/metax_api/api/rest/base/views/data_catalog_view.py index 9ee1b692..b9bb09ff 100755 --- a/src/metax_api/api/rest/base/views/data_catalog_view.py +++ b/src/metax_api/api/rest/base/views/data_catalog_view.py @@ -17,7 +17,7 @@ class DataCatalogViewSet(CommonViewSet): serializer_class = DataCatalogSerializer object = DataCatalog - lookup_field = 'pk' + lookup_field = "pk" def __init__(self, *args, **kwargs): self.set_json_schema(__file__) @@ -41,7 +41,7 @@ def _search_using_other_data_catalog_identifiers(self): lookup_value = self.kwargs.get(self.lookup_field) try: - obj = self._search_from_catalog_json({'identifier': lookup_value}, True) + obj = self._search_from_catalog_json({"identifier": lookup_value}, True) except Exception: raise @@ -50,7 +50,8 @@ def _search_using_other_data_catalog_identifiers(self): def _search_from_catalog_json(self, search_json, raise_on_404): try: return super(DataCatalogViewSet, self).get_object( - search_params={'catalog_json__contains': search_json}) + search_params={"catalog_json__contains": search_json} + ) except Http404: if raise_on_404: raise diff --git a/src/metax_api/api/rest/base/views/dataset_view.py b/src/metax_api/api/rest/base/views/dataset_view.py index d18becd1..bd416afa 100755 --- a/src/metax_api/api/rest/base/views/dataset_view.py +++ b/src/metax_api/api/rest/base/views/dataset_view.py @@ -17,7 +17,11 @@ from metax_api.exceptions import Http400, Http403 from metax_api.models import CatalogRecord, Common, DataCatalog, Directory, File from metax_api.renderers import XMLRenderer -from metax_api.services import CatalogRecordService, CommonService as CS, RabbitMQService as rabbitmq +from metax_api.services import ( + CatalogRecordService, + CommonService as CS, + RabbitMQService as rabbitmq, +) from metax_api.settings import env from ..serializers import CatalogRecordSerializer, LightFileSerializer @@ -52,9 +56,7 @@ def get_object(self): try: cr = super(DatasetViewSet, self).get_object() except Http404: - if self.service_class.is_primary_key( - self.kwargs.get(self.lookup_field, False) - ): + if self.service_class.is_primary_key(self.kwargs.get(self.lookup_field, False)): # fail on pk search is clear... raise @@ -66,7 +68,7 @@ def get_object(self): return cr def get_queryset(self): - if not CS.get_boolean_query_param(self.request, 'include_legacy'): + if not CS.get_boolean_query_param(self.request, "include_legacy"): self.queryset = self.queryset.exclude( data_catalog__catalog_json__identifier__in=settings.LEGACY_CATALOGS ) @@ -105,9 +107,7 @@ def list(self, request, *args, **kwargs): # best to specify a variable for parameters intended for filtering purposes in get_queryset(), # because other api's may use query parameters of the same name, which can # mess up filtering if get_queryset() uses request.query_parameters directly. - self.queryset_search_params = self.service_class.get_queryset_search_params( - request - ) + self.queryset_search_params = self.service_class.get_queryset_search_params(request) if "preferred_identifier" in request.query_params: return self._retrieve_by_preferred_identifier(request, *args, **kwargs) @@ -134,14 +134,10 @@ def _metadata_version_get(self, request, *args, **kwargs): if self.service_class.is_primary_key(kwargs["metadata_version_identifier"]): search_params["id"] = kwargs["metadata_version_identifier"] else: - search_params["metadata_version_identifier"] = kwargs[ - "metadata_version_identifier" - ] + search_params["metadata_version_identifier"] = kwargs["metadata_version_identifier"] try: - research_dataset = cr.research_dataset_versions.get( - **search_params - ).research_dataset + research_dataset = cr.research_dataset_versions.get(**search_params).research_dataset except: raise Http404 @@ -152,10 +148,8 @@ def _metadata_version_get(self, request, *args, **kwargs): # possible to use the serializer, since an older metadata version of a ds # is not stored as part of the cr, but in the table ResearchDatasetVersion. # therefore, perform this checking and stripping separately here. - research_dataset = ( - self.service_class.check_and_remove_metadata_based_on_access_type( - self.service_class.remove_contact_info_metadata(research_dataset) - ) + research_dataset = self.service_class.check_and_remove_metadata_based_on_access_type( + self.service_class.remove_contact_info_metadata(research_dataset) ) return Response(data=research_dataset, status=status.HTTP_200_OK) @@ -207,9 +201,7 @@ def files_list(self, request, pk=None): @action(detail=False, methods=["get"], url_path="identifiers") def get_all_identifiers(self, request): - self.queryset_search_params = self.service_class.get_queryset_search_params( - request - ) + self.queryset_search_params = self.service_class.get_queryset_search_params(request) q = self.get_queryset().values("identifier") identifiers = [item["identifier"] for item in q] return Response(identifiers) @@ -217,26 +209,18 @@ def get_all_identifiers(self, request): @action(detail=False, methods=["get"], url_path="metadata_version_identifiers") def get_all_metadata_version_identifiers(self, request): # todo probably remove at some point - self.queryset_search_params = self.service_class.get_queryset_search_params( - request - ) + self.queryset_search_params = self.service_class.get_queryset_search_params(request) q = self.get_queryset().values("research_dataset") - identifiers = [ - item["research_dataset"]["metadata_version_identifier"] for item in q - ] + identifiers = [item["research_dataset"]["metadata_version_identifier"] for item in q] return Response(identifiers) @action(detail=False, methods=["get"], url_path="unique_preferred_identifiers") def get_all_unique_preferred_identifiers(self, request): - self.queryset_search_params = self.service_class.get_queryset_search_params( - request - ) + self.queryset_search_params = self.service_class.get_queryset_search_params(request) if CS.get_boolean_query_param(request, "latest"): queryset = ( - self.get_queryset() - .filter(next_dataset_version_id=None) - .values("research_dataset") + self.get_queryset().filter(next_dataset_version_id=None).values("research_dataset") ) else: queryset = self.get_queryset().values("research_dataset") @@ -254,19 +238,14 @@ def _search_using_dataset_identifiers(self): """ lookup_value = self.kwargs.get(self.lookup_field, False) - if ( - "preferred_identifier" in self.request.query_params - and self.request.method == "GET" - ): + if "preferred_identifier" in self.request.query_params and self.request.method == "GET": # search by preferred_identifier only for GET requests, while preferring: # - hits from att catalogs (assumed to be first created. improve logic if situation changes) # - first created (the first harvested occurrence, probably) # note: cant use get_object(), because get_object() will throw an error if there are multiple results obj = ( self.get_queryset() - .filter( - research_dataset__contains={"preferred_identifier": lookup_value} - ) + .filter(research_dataset__contains={"preferred_identifier": lookup_value}) .order_by("data_catalog_id", "date_created") .first() ) @@ -280,17 +259,13 @@ def _search_using_dataset_identifiers(self): # services using this... return super(DatasetViewSet, self).get_object( search_params={ - "research_dataset__contains": { - "metadata_version_identifier": lookup_value - } + "research_dataset__contains": {"metadata_version_identifier": lookup_value} } ) except Http404: pass - return super(DatasetViewSet, self).get_object( - search_params={"identifier": lookup_value} - ) + return super(DatasetViewSet, self).get_object(search_params={"identifier": lookup_value}) @action(detail=True, methods=["get"], url_path="redis") def redis_test(self, request, pk=None): # pragma: no cover @@ -321,12 +296,8 @@ def redis_test(self, request, pk=None): # pragma: no cover def rabbitmq_test(self, request, pk=None): # pragma: no cover if request.user.username != "metax": raise Http403() - rabbitmq.publish( - {"msg": "hello create"}, routing_key="create", exchange="datasets" - ) - rabbitmq.publish( - {"msg": "hello update"}, routing_key="update", exchange="datasets" - ) + rabbitmq.publish({"msg": "hello create"}, routing_key="create", exchange="datasets") + rabbitmq.publish({"msg": "hello update"}, routing_key="update", exchange="datasets") return Response(data={}, status=status.HTTP_200_OK) @action( @@ -351,9 +322,7 @@ def update_cr_total_files_byte_sizes(self, request): # Update IDA CR total_files_byte_size field value without creating a new version # Skip CatalogRecord save since it prohibits changing the value of total_files_byte_size for cr in self.object.objects.filter(data_catalog_id__in=ida_catalog_ids): - cr.research_dataset["total_files_byte_size"] = sum( - f.byte_size for f in cr.files.all() - ) + cr.research_dataset["total_files_byte_size"] = sum(f.byte_size for f in cr.files.all()) cr.preserve_version = True super(Common, cr).save() @@ -411,8 +380,8 @@ def list_datasets(self, request): def destroy_bulk(self, request, *args, **kwargs): return self.service_class.destroy_bulk(request) - @action(detail=False, methods=['post'], url_path="flush_password") - def flush_password(self, request): # pragma: no cover + @action(detail=False, methods=["post"], url_path="flush_password") + def flush_password(self, request): # pragma: no cover """ Set a password for flush api """ diff --git a/src/metax_api/api/rest/base/views/directory_view.py b/src/metax_api/api/rest/base/views/directory_view.py index 53ebf0b3..587363c2 100755 --- a/src/metax_api/api/rest/base/views/directory_view.py +++ b/src/metax_api/api/rest/base/views/directory_view.py @@ -22,14 +22,14 @@ class DirectoryViewSet(CommonViewSet): serializer_class = DirectorySerializer object = Directory - select_related = ['parent_directory'] - lookup_field_other = 'identifier' + select_related = ["parent_directory"] + lookup_field_other = "identifier" def get_api_name(self): """ Overrided due to not being able to follow common plural pattern... """ - return 'directories' + return "directories" def list(self, request, *args, **kwargs): raise Http501() @@ -57,31 +57,36 @@ def _get_directory_contents(self, request, identifier=None): A wrapper to call FS to collect and validate parameters from the request, and then call FS.get_directory_contents(). """ - paginate = CommonService.get_boolean_query_param(request, 'pagination') - path = request.query_params.get('path', None) - include_parent = CommonService.get_boolean_query_param(request, 'include_parent') - dirs_only = CommonService.get_boolean_query_param(request, 'directories_only') - recursive = CommonService.get_boolean_query_param(request, 'recursive') - max_depth = request.query_params.get('depth', 1) - project_identifier = request.query_params.get('project', None) - cr_identifier = request.query_params.get('cr_identifier', None) - not_cr_identifier = request.query_params.get('not_cr_identifier', None) - file_name = request.query_params.get('file_name') - directory_name = request.query_params.get('directory_name') + paginate = CommonService.get_boolean_query_param(request, "pagination") + path = request.query_params.get("path", None) + include_parent = CommonService.get_boolean_query_param(request, "include_parent") + dirs_only = CommonService.get_boolean_query_param(request, "directories_only") + recursive = CommonService.get_boolean_query_param(request, "recursive") + max_depth = request.query_params.get("depth", 1) + project_identifier = request.query_params.get("project", None) + cr_identifier = request.query_params.get("cr_identifier", None) + not_cr_identifier = request.query_params.get("not_cr_identifier", None) + file_name = request.query_params.get("file_name") + directory_name = request.query_params.get("directory_name") # max_depth can be an integer > 0, or * for everything. try: max_depth = int(max_depth) except ValueError: - if max_depth != '*': - raise Http400({ 'detail': ['value of depth must be an integer higher than 0, or *'] }) + if max_depth != "*": + raise Http400({"detail": ["value of depth must be an integer higher than 0, or *"]}) else: if max_depth <= 0: - raise Http400({ 'detail': ['value of depth must be higher than 0'] }) + raise Http400({"detail": ["value of depth must be higher than 0"]}) if cr_identifier and not_cr_identifier: - raise Http400({ 'detail': - ["there can only be one query parameter of 'cr_identifier' and 'not_cr_identifier'"] }) + raise Http400( + { + "detail": [ + "there can only be one query parameter of 'cr_identifier' and 'not_cr_identifier'" + ] + } + ) files_and_dirs = FileService.get_directory_contents( identifier=identifier, @@ -96,7 +101,7 @@ def _get_directory_contents(self, request, identifier=None): file_name=file_name, directory_name=directory_name, paginate=paginate, - request=request + request=request, ) if paginate: @@ -104,14 +109,14 @@ def _get_directory_contents(self, request, identifier=None): return Response(files_and_dirs) - @action(detail=True, methods=['get'], url_path="files") + @action(detail=True, methods=["get"], url_path="files") def get_files(self, request, pk=None): """ Return a list of child files and directories of a directory. """ return self._get_directory_contents(request, identifier=pk) - @action(detail=False, methods=['get'], url_path="files") + @action(detail=False, methods=["get"], url_path="files") def get_files_by_path(self, request): """ Return a list of child files and directories of a directory, queried @@ -119,17 +124,17 @@ def get_files_by_path(self, request): """ errors = defaultdict(list) - if 'project' not in request.query_params: - errors['detail'].append('project is a required query parameter') - if 'path' not in request.query_params: - errors['detail'].append('path is a required query parameter') + if "project" not in request.query_params: + errors["detail"].append("project is a required query parameter") + if "path" not in request.query_params: + errors["detail"].append("path is a required query parameter") if errors: raise Http400(errors) return self._get_directory_contents(request) - @action(detail=False, methods=['get'], url_path="root") + @action(detail=False, methods=["get"], url_path="root") def get_project_root_directory(self, request): """ Return root directory for a project. This is useful when starting @@ -138,18 +143,18 @@ def get_project_root_directory(self, request): Example: GET /directories/root?project=projext_x """ - if 'project' not in request.query_params: - raise Http400('project is a required query parameter') + if "project" not in request.query_params: + raise Http400("project is a required query parameter") if not request.user.is_service: - FileService.check_user_belongs_to_project(request, request.query_params['project']) + FileService.check_user_belongs_to_project(request, request.query_params["project"]) - root_dirs = FileService.get_project_root_directory(request.query_params['project']) + root_dirs = FileService.get_project_root_directory(request.query_params["project"]) return Response(root_dirs) - @action(detail=False, methods=['get'], url_path="update_byte_sizes_and_file_counts") - def update_byte_sizes_and_file_counts(self, request): # pragma: no cover + @action(detail=False, methods=["get"], url_path="update_byte_sizes_and_file_counts") + def update_byte_sizes_and_file_counts(self, request): # pragma: no cover """ Calculate byte sizes and file counts for all dirs in all projects. Intended to be called after importing test data. @@ -157,10 +162,14 @@ def update_byte_sizes_and_file_counts(self, request): # pragma: no cover If needed there should be no harm in calling this method again at any time in an attempt to correct mistakes in real data. """ - if request.user.username != 'metax': + if request.user.username != "metax": raise Http403 - for p in Directory.objects.all().distinct('project_identifier').values_list('project_identifier', flat=True): + for p in ( + Directory.objects.all() + .distinct("project_identifier") + .values_list("project_identifier", flat=True) + ): FileService.calculate_project_directory_byte_sizes_and_file_counts(p) return Response() diff --git a/src/metax_api/api/rest/base/views/file_storage_view.py b/src/metax_api/api/rest/base/views/file_storage_view.py index ff2b57c8..c2fcf663 100755 --- a/src/metax_api/api/rest/base/views/file_storage_view.py +++ b/src/metax_api/api/rest/base/views/file_storage_view.py @@ -20,7 +20,7 @@ class FileStorageViewSet(CommonViewSet): serializer_class = FileStorageSerializer object = FileStorage - lookup_field = 'pk' + lookup_field = "pk" def __init__(self, *args, **kwargs): self.set_json_schema(__file__) @@ -35,7 +35,8 @@ def get_object(self): # was not a pk - try identifier lookup_value = self.kwargs.pop(self.lookup_field) return super(FileStorageViewSet, self).get_object( - search_params={'file_storage_json__contains': {'identifier': lookup_value}}) + search_params={"file_storage_json__contains": {"identifier": lookup_value}} + ) def set_json_schema(self, view_file): - self.json_schema = CS.get_json_schema(path.dirname(view_file) + '/../schemas', 'file') + self.json_schema = CS.get_json_schema(path.dirname(view_file) + "/../schemas", "file") diff --git a/src/metax_api/api/rest/base/views/file_view.py b/src/metax_api/api/rest/base/views/file_view.py index d093b241..32e3b891 100755 --- a/src/metax_api/api/rest/base/views/file_view.py +++ b/src/metax_api/api/rest/base/views/file_view.py @@ -29,7 +29,7 @@ # i.e. /rest/v6/files, but must NOT end in / # or: /rest/files, but must NOT end in / -RE_PATTERN_FILES_CREATE = re.compile(r'^/rest/(v\d/)?files(?!/)') +RE_PATTERN_FILES_CREATE = re.compile(r"^/rest/(v\d/)?files(?!/)") # none of the methods in this class use atomic requests by default! see method dispatch() @@ -38,12 +38,12 @@ class FileViewSet(CommonViewSet): serializer_class = FileSerializer object = File - select_related = ['file_storage', 'parent_directory'] + select_related = ["file_storage", "parent_directory"] - lookup_field = 'pk' + lookup_field = "pk" # allow search by external identifier (urn, or whatever string in practice) as well - lookup_field_other = 'identifier' + lookup_field_other = "identifier" # customized create_bulk which handles both directories and files in the same # bulk_create request. @@ -55,9 +55,9 @@ def __init__(self, *args, **kwargs): def _use_transaction(self, request): # todo add checking of ?atomic parameter too? - if CommonService.get_boolean_query_param(self.request, 'dryrun'): + if CommonService.get_boolean_query_param(self.request, "dryrun"): return True - elif request.method == 'POST' and RE_PATTERN_FILES_CREATE.match(request.META['PATH_INFO']): + elif request.method == "POST" and RE_PATTERN_FILES_CREATE.match(request.META["PATH_INFO"]): # for POST /files only (creating), do not use a transaction ! return False return True @@ -78,10 +78,10 @@ def dispatch(self, request, **kwargs): """ if self._use_transaction(request): with transaction.atomic(): - _logger.debug('Note: Request in transaction') + _logger.debug("Note: Request in transaction") return super().dispatch(request, **kwargs) else: - _logger.debug('Note: Request not in transaction') + _logger.debug("Note: Request not in transaction") return super().dispatch(request, **kwargs) def list(self, request, *args, **kwargs): @@ -89,20 +89,20 @@ def list(self, request, *args, **kwargs): if not request.user.is_service: # end users can only retrieve their own files user_projects = AuthService.get_user_projects(request) - self.queryset_search_params['project_identifier__in'] = user_projects + self.queryset_search_params["project_identifier__in"] = user_projects return super().list(request, *args, **kwargs) def update(self, request, *args, **kwargs): - #This have to be checked before updating common info + # This have to be checked before updating common info if not isinstance(self.request.data, dict): - raise Http400('request message body must be a single json object') + raise Http400("request message body must be a single json object") return super().update(request, *args, **kwargs) def partial_update(self, request, *args, **kwargs): - #This have to be checked before updating common info + # This have to be checked before updating common info if not isinstance(self.request.data, dict): - raise Http400('request message body must be a single json object') + raise Http400("request message body must be a single json object") return super().partial_update(request, *args, **kwargs) @@ -115,15 +115,15 @@ def get_queryset(self): checksum:value --> checksum_value checksum --> checksum_algorithm/checked/value """ - if 'fields' in self.request.query_params: + if "fields" in self.request.query_params: self.fields = [] - for field in self.request.query_params['fields'].split(','): - field_names = field.split(':') - if 'checksum' == field_names[0] and len(field_names) > 1: - self.fields.append(f'{field_names[0]}_{field_names[1]}') + for field in self.request.query_params["fields"].split(","): + field_names = field.split(":") + if "checksum" == field_names[0] and len(field_names) > 1: + self.fields.append(f"{field_names[0]}_{field_names[1]}") - elif 'checksum' == field_names[0]: - self.fields.extend([f'checksum_{k}' for k in FileSerializer.checksum_fields]) + elif "checksum" == field_names[0]: + self.fields.extend([f"checksum_{k}" for k in FileSerializer.checksum_fields]) else: self.fields.append(field_names[0]) @@ -137,9 +137,9 @@ def get_object(self, search_params=None): """ obj = super().get_object(search_params) if self.request.user.is_service: - allowed_projects = CommonService.get_list_query_param(self.request, 'allowed_projects') + allowed_projects = CommonService.get_list_query_param(self.request, "allowed_projects") if allowed_projects is not None and obj.project_identifier not in allowed_projects: - raise Http403('You do not have permission to update this file') + raise Http403("You do not have permission to update this file") return obj @@ -163,7 +163,7 @@ def partial_update_bulk(self, request, *args, **kwargs): return super().partial_update_bulk(request, *args, **kwargs) - @action(detail=False, methods=['post'], url_path="datasets") + @action(detail=False, methods=["post"], url_path="datasets") def datasets(self, request): """ keys=files: Find out which datasets a list of files belongs to, and return their @@ -180,24 +180,26 @@ def datasets(self, request): requests to query parameters, so using POST instead is more guaranteed to work. """ - keysonly = CommonService.get_boolean_query_param(request, 'keysonly') - detailed = CommonService.get_boolean_query_param(request, 'detailed') + keysonly = CommonService.get_boolean_query_param(request, "keysonly") + detailed = CommonService.get_boolean_query_param(request, "detailed") params = request.query_params if not params.keys(): - return FileService.get_identifiers(request.data, 'noparams', True) + return FileService.get_identifiers(request.data, "noparams", True) - if 'keys' in params.keys(): - if params['keys'] in ['files', 'datasets']: - return FileService.get_identifiers(request.data, params['keys'], keysonly) + if "keys" in params.keys(): + if params["keys"] in ["files", "datasets"]: + return FileService.get_identifiers(request.data, params["keys"], keysonly) - if detailed: # This can be removed as soon as front can listen to ?keys=files which returns the same - return FileService.get_identifiers(request.data, 'files', False) + if ( + detailed + ): # This can be removed as soon as front can listen to ?keys=files which returns the same + return FileService.get_identifiers(request.data, "files", False) - raise Http403({ 'detail': [ 'Invalid parameters' ]}) + raise Http403({"detail": ["Invalid parameters"]}) - @action(detail=False, methods=['post'], url_path="restore") + @action(detail=False, methods=["post"], url_path="restore") def restore_files(self, request): """ Restore removed files. @@ -210,30 +212,30 @@ def destroy(self, request, pk, **kwargs): def destroy_bulk(self, request, *args, **kwargs): return FileService.destroy_bulk(request.data) - @action(detail=True, methods=['get', 'post', 'put', 'delete'], url_path='xml') + @action(detail=True, methods=["get", "post", "put", "delete"], url_path="xml") def xml_handler(self, request, pk=None): file = self.get_object() - if request.method == 'GET': + if request.method == "GET": return self._get_xml(request, file) else: - if 'namespace' not in request.query_params: - raise Http400('namespace is a required query parameter') + if "namespace" not in request.query_params: + raise Http400("namespace is a required query parameter") - if request.method == 'PUT': + if request.method == "PUT": return self._update_xml(request, file) - elif request.method == 'POST': + elif request.method == "POST": return self._create_xml(request, file) - elif request.method == 'DELETE': + elif request.method == "DELETE": return self._delete_xml(request, file) else: raise Http404 def _get_xml(self, request, file): - if 'namespace' in request.query_params: + if "namespace" in request.query_params: # get single requested xml metadata by namespace try: - xml_metadata = file.xmlmetadata_set.get(namespace=request.query_params['namespace']) + xml_metadata = file.xmlmetadata_set.get(namespace=request.query_params["namespace"]) except XmlMetadata.DoesNotExist: raise Http404 request.accepted_renderer = XMLRenderer() @@ -241,18 +243,20 @@ def _get_xml(self, request, file): else: # return list of namespaces of xml metadatas associated with the file - xml_namespaces = file.xmlmetadata_set.all().values_list('namespace', flat=True) + xml_namespaces = file.xmlmetadata_set.all().values_list("namespace", flat=True) request.accepted_renderer = JSONRenderer() - return Response(data=[ ns for ns in xml_namespaces ], status=status.HTTP_200_OK) + return Response(data=[ns for ns in xml_namespaces], status=status.HTTP_200_OK) def _create_xml(self, request, file): try: - file.xmlmetadata_set.get(namespace=request.query_params['namespace']) + file.xmlmetadata_set.get(namespace=request.query_params["namespace"]) except XmlMetadata.DoesNotExist: # good - create for the first time pass else: - raise Http400('xml metadata with namespace %s already exists' % request.query_params['namespace']) + raise Http400( + "xml metadata with namespace %s already exists" % request.query_params["namespace"] + ) new_xml_metadata = self._xml_request_to_dict_data(request, file) serializer = XmlMetadataSerializer(data=new_xml_metadata) @@ -268,7 +272,7 @@ def _create_xml(self, request, file): def _update_xml(self, request, file): try: - xml_metadata = file.xmlmetadata_set.get(namespace=request.query_params['namespace']) + xml_metadata = file.xmlmetadata_set.get(namespace=request.query_params["namespace"]) except XmlMetadata.DoesNotExist: raise Http404 @@ -285,7 +289,7 @@ def _update_xml(self, request, file): def _delete_xml(self, request, file): try: - xml_metadata = file.xmlmetadata_set.get(namespace=request.query_params['namespace']) + xml_metadata = file.xmlmetadata_set.get(namespace=request.query_params["namespace"]) except XmlMetadata.DoesNotExist: raise Http404 xml_metadata.delete() @@ -298,14 +302,16 @@ def _xml_request_to_dict_data(self, request, file): """ common_info = CommonService.update_common_info(request, return_only=True) new_xml_metadata = { - 'file': file.id, - 'xml': request.data, - 'namespace': request.query_params['namespace'] + "file": file.id, + "xml": request.data, + "namespace": request.query_params["namespace"], } new_xml_metadata.update(common_info) return new_xml_metadata - @action(detail=False, methods=['post'], url_path="flush_project") - def flush_project(self, request): # pragma: no cover + @action(detail=False, methods=["post"], url_path="flush_project") + def flush_project(self, request): # pragma: no cover # todo remove api when comfortable - raise ValidationError({ 'detail': ['API has been moved to RPC API: /rpc/files/flush_project'] }) + raise ValidationError( + {"detail": ["API has been moved to RPC API: /rpc/files/flush_project"]} + ) diff --git a/src/metax_api/api/rest/base/views/schema_view.py b/src/metax_api/api/rest/base/views/schema_view.py index 7c480aa6..0afed1f7 100755 --- a/src/metax_api/api/rest/base/views/schema_view.py +++ b/src/metax_api/api/rest/base/views/schema_view.py @@ -16,13 +16,13 @@ class SchemaViewSet(viewsets.ReadOnlyModelViewSet): filter_backends = () authentication_classes = () permission_classes = (ServicePermissions,) - api_type = 'rest' + api_type = "rest" def list(self, request, *args, **kwargs): return SchemaService.get_all_schemas() def retrieve(self, request, *args, **kwargs): - return SchemaService.get_schema_content(kwargs.get('pk')) + return SchemaService.get_schema_content(kwargs.get("pk")) def get_queryset(self): return self.list(None) @@ -31,4 +31,4 @@ def get_api_name(self): """ Does not inherit from common... """ - return 'schemas' + return "schemas" diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index 97ed1999..a0ea1d9a 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -36,7 +36,6 @@ class CustomRouterV2(CustomRouter): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -48,61 +47,67 @@ def __init__(self, *args, **kwargs): # - retrieve all file user metadata # - update file user metadata in bulk-manner - self.routes.append(Route( - url=r'^{prefix}/{lookup}/files/user_metadata{trailing_slash}$', - mapping={ - 'get': 'files_user_metadata_list', - 'put': 'files_user_metadata_update', - 'patch': 'files_user_metadata_update', - }, - name='{basename}-files-user-metadata-list', - detail=True, - initkwargs={'suffix': 'FilesUserMetadataList'} - )) + self.routes.append( + Route( + url=r"^{prefix}/{lookup}/files/user_metadata{trailing_slash}$", + mapping={ + "get": "files_user_metadata_list", + "put": "files_user_metadata_update", + "patch": "files_user_metadata_update", + }, + name="{basename}-files-user-metadata-list", + detail=True, + initkwargs={"suffix": "FilesUserMetadataList"}, + ) + ) # - retrieve single dataset file technical metadata - self.routes.append(Route( - url=r'^{prefix}/{lookup}/files/(?P.+){trailing_slash}$', - mapping={ - 'get': 'files_retrieve', - }, - name='{basename}-files-retrieve', - detail=True, - initkwargs={'suffix': 'FilesRetrieve'} - )) + self.routes.append( + Route( + url=r"^{prefix}/{lookup}/files/(?P.+){trailing_slash}$", + mapping={ + "get": "files_retrieve", + }, + name="{basename}-files-retrieve", + detail=True, + initkwargs={"suffix": "FilesRetrieve"}, + ) + ) # - retrieve all dataset files technical metadata # - change files of a dataset - self.routes.append(Route( - url=r'^{prefix}/{lookup}/files{trailing_slash}$', - mapping={ - 'get': 'files_list', - 'post': 'files_post', - }, - name='{basename}-files-list', - detail=True, - initkwargs={'suffix': 'FilesList'} - )) + self.routes.append( + Route( + url=r"^{prefix}/{lookup}/files{trailing_slash}$", + mapping={ + "get": "files_list", + "post": "files_post", + }, + name="{basename}-files-list", + detail=True, + initkwargs={"suffix": "FilesList"}, + ) + ) # v2 urls, but using v1 view classes, because nothing changes router_v1 = CustomRouter(trailing_slash=False) -router_v1.register(r'apierrors/?', ApiErrorViewSet) -router_v1.register(r'contracts/?', ContractViewSet) -router_v1.register(r'directories/?', DirectoryViewSet) -router_v1.register(r'files/?', FileViewSet) -router_v1.register(r'filestorages/?', FileStorageViewSet) -router_v1.register(r'schemas/?', SchemaViewSet) +router_v1.register(r"apierrors/?", ApiErrorViewSet) +router_v1.register(r"contracts/?", ContractViewSet) +router_v1.register(r"directories/?", DirectoryViewSet) +router_v1.register(r"files/?", FileViewSet) +router_v1.register(r"filestorages/?", FileStorageViewSet) +router_v1.register(r"schemas/?", SchemaViewSet) # v2 urls, using v2 view classes with changes router_v2 = CustomRouterV2(trailing_slash=False) -router_v2.register(r'datasets/?', DatasetViewSet) -router_v2.register(r'datacatalogs/?', DataCatalogViewSet) +router_v2.register(r"datasets/?", DatasetViewSet) +router_v2.register(r"datacatalogs/?", DataCatalogViewSet) router_v2.register( - r'datasets/(?P.+)/metadata_versions/(?P.+)/?', - DatasetViewSet + r"datasets/(?P.+)/metadata_versions/(?P.+)/?", + DatasetViewSet, ) api_urlpatterns = router_v1.urls + router_v2.urls diff --git a/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py b/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py index d89dc29d..f893a414 100755 --- a/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py @@ -16,13 +16,16 @@ from metax_api.api.rest.base.serializers import CatalogRecordSerializer from metax_api.api.rest.base.serializers.catalog_record_serializer import DFT_CATALOG from metax_api.models import CatalogRecordV2 -from metax_api.services import CatalogRecordService as CRS, CommonService as CS, RedisCacheService as cache +from metax_api.services import ( + CatalogRecordService as CRS, + CommonService as CS, + RedisCacheService as cache, +) _logger = logging.getLogger(__name__) class CatalogRecordSerializerV2(CatalogRecordSerializer): - class Meta: # deepcopied, so that changes in this model don't affect # the V1 model @@ -31,55 +34,59 @@ class Meta: # define separately for inherited class, so that schemas are searched # from api/rest/v2/schemas, instead of api/rest/v1/schemas - _schemas_directory_path = path.join(path.dirname(path.dirname(__file__)), 'schemas') + _schemas_directory_path = path.join(path.dirname(path.dirname(__file__)), "schemas") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.Meta.model = CatalogRecordV2 self.Meta.fields += ( - 'draft_of', - 'next_draft', + "draft_of", + "next_draft", + ) + self.Meta.extra_kwargs.update( + { + "draft_of": {"required": False}, + "next_draft": {"required": False}, + } ) - self.Meta.extra_kwargs.update({ - 'draft_of': { 'required': False }, - 'next_draft': { 'required': False }, - }) def is_valid(self, raise_exception=False): - if CS.get_boolean_query_param(self.context['request'], 'draft') and not self.initial_data.get('data_catalog'): - self.initial_data['data_catalog'] = DFT_CATALOG - - self.initial_data.pop('draft_of', None) - self.initial_data.pop('editor', None) - self.initial_data.pop('next_draft', None) + if CS.get_boolean_query_param( + self.context["request"], "draft" + ) and not self.initial_data.get("data_catalog"): + self.initial_data["data_catalog"] = DFT_CATALOG + + self.initial_data.pop("draft_of", None) + self.initial_data.pop("editor", None) + self.initial_data.pop("next_draft", None) super().is_valid(raise_exception=raise_exception) def to_representation(self, instance): res = super().to_representation(instance) - if 'request' in self.context: + if "request" in self.context: - if CS.get_boolean_query_param(self.context['request'], 'include_user_metadata'): + if CS.get_boolean_query_param(self.context["request"], "include_user_metadata"): # keep user metadata and possible file_details that have been populated in super().to_representation() pass else: - res.get('research_dataset', {}).pop('files', None) - res.get('research_dataset', {}).pop('directories', None) + res.get("research_dataset", {}).pop("files", None) + res.get("research_dataset", {}).pop("directories", None) - if 'draft_of' in res: - if instance.user_is_privileged(instance.request or self.context['request']): - res['draft_of'] = instance.draft_of.identifiers_dict + if "draft_of" in res: + if instance.user_is_privileged(instance.request or self.context["request"]): + res["draft_of"] = instance.draft_of.identifiers_dict else: - del res['draft_of'] + del res["draft_of"] - if 'next_draft' in res: - if instance.user_is_privileged(instance.request or self.context['request']): - res['next_draft'] = instance.next_draft.identifiers_dict + if "next_draft" in res: + if instance.user_is_privileged(instance.request or self.context["request"]): + res["next_draft"] = instance.next_draft.identifiers_dict else: - del res['next_draft'] + del res["next_draft"] - res.pop('editor', None) + res.pop("editor", None) return res @@ -97,15 +104,17 @@ def validate_research_dataset_files(self, value): CRS.validate_reference_data(value, cache) - rd_files_schema = CS.get_json_schema(self._schemas_directory_path, 'dataset_files') + rd_files_schema = CS.get_json_schema(self._schemas_directory_path, "dataset_files") resolver = RefResolver( # at some point when jsonschema package is updated, probably need to switch to # using the below commented out parameter names instead # schema_path='file:{}'.format(path.dirname(path.dirname(__file__)) + '/schemas/dataset_files_schema.json'), # schema=rd_files_schema - base_uri='file:{}'.format(path.join(self._schemas_directory_path, 'dataset_files_schema.json')), - referrer=rd_files_schema + base_uri="file:{}".format( + path.join(self._schemas_directory_path, "dataset_files_schema.json") + ), + referrer=rd_files_schema, ) # for debugging, below may be useful @@ -114,27 +123,35 @@ def validate_research_dataset_files(self, value): validator = Draft4Validator(rd_files_schema, resolver=resolver, format_checker=None) if not value: - _logger.info('Validating files and/or directories with empty value. Nothing to validate.') + _logger.info( + "Validating files and/or directories with empty value. Nothing to validate." + ) return try: validator.validate(value) except JsonValidationError as e: - raise ValidationError({ 'detail': - ['%s. Json path: %s. Schema: %s' % (e.message, [p for p in e.path], e.schema)] - }) + raise ValidationError( + { + "detail": [ + "%s. Json path: %s. Schema: %s" % (e.message, [p for p in e.path], e.schema) + ] + } + ) def _set_dataset_schema(self): if self._validate_as_draft(): # drafts only exists for V2 records, otherwise normal rules apply - schema_prefix = 'dft' - self.json_schema = CS.get_json_schema(self._schemas_directory_path, 'dataset', schema_prefix) + schema_prefix = "dft" + self.json_schema = CS.get_json_schema( + self._schemas_directory_path, "dataset", schema_prefix + ) else: super()._set_dataset_schema() def _validate_as_draft(self): if self._operation_is_create and "request" in self.context: - return CS.get_boolean_query_param(self.context["request"], 'draft') + return CS.get_boolean_query_param(self.context["request"], "draft") if CS.request_is_create_operation(self.instance.request): return self.instance._save_as_draft() diff --git a/src/metax_api/api/rest/v2/serializers/data_catalog_serializer.py b/src/metax_api/api/rest/v2/serializers/data_catalog_serializer.py index 717de238..e0d2799a 100755 --- a/src/metax_api/api/rest/v2/serializers/data_catalog_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/data_catalog_serializer.py @@ -12,25 +12,31 @@ class DataCatalogSerializerV2(DataCatalogSerializer): - def is_valid(self, raise_exception=False): """ Define this here that data catalogs are validated using JSON-schemas from V2 directory. """ super(DataCatalogSerializer, self).is_valid(raise_exception=raise_exception) - if 'catalog_json' in self.initial_data: + if "catalog_json" in self.initial_data: self._validate_dataset_schema() - if self.initial_data['catalog_json'].get('dataset_versioning', False) is True \ - and self.initial_data['catalog_json'].get('harvested', False) is True: - raise ValidationError({ - 'detail': ['versioning cannot be enabled in harvested catalogs.'] - }) + if ( + self.initial_data["catalog_json"].get("dataset_versioning", False) is True + and self.initial_data["catalog_json"].get("harvested", False) is True + ): + raise ValidationError( + {"detail": ["versioning cannot be enabled in harvested catalogs."]} + ) def _validate_dataset_schema(self): - rd_schema = self.initial_data['catalog_json'].get('research_dataset_schema', None) + rd_schema = self.initial_data["catalog_json"].get("research_dataset_schema", None) if not rd_schema: return - schema_path = '%s/../schemas/%s_dataset_schema.json' % (path.dirname(__file__), rd_schema) + schema_path = "%s/../schemas/%s_dataset_schema.json" % ( + path.dirname(__file__), + rd_schema, + ) if not path.isfile(schema_path): - raise ValidationError({'catalog_json': ['research dataset schema \'%s\' not found' % rd_schema]}) + raise ValidationError( + {"catalog_json": ["research dataset schema '%s' not found" % rd_schema]} + ) diff --git a/src/metax_api/api/rest/v2/views/dataset_view.py b/src/metax_api/api/rest/v2/views/dataset_view.py index d9f7bf95..84eb1776 100755 --- a/src/metax_api/api/rest/v2/views/dataset_view.py +++ b/src/metax_api/api/rest/v2/views/dataset_view.py @@ -22,9 +22,10 @@ _logger = logging.getLogger(__name__) -UNAUTHORIZED_TO_SEE_FILES_MSG = \ - 'You do not have permission to see this information because the dataset access type ' \ - 'is not open and you are not the owner of the catalog record.' +UNAUTHORIZED_TO_SEE_FILES_MSG = ( + "You do not have permission to see this information because the dataset access type " + "is not open and you are not the owner of the catalog record." +) class DatasetViewSet(DatasetViewSet): @@ -33,7 +34,7 @@ class DatasetViewSet(DatasetViewSet): serializer_class = CatalogRecordSerializerV2 object = CatalogRecordV2 - @action(detail=True, methods=['get'], url_path="projects") + @action(detail=True, methods=["get"], url_path="projects") def projects_list(self, request, pk=None): # note: checks permissions @@ -43,7 +44,10 @@ def projects_list(self, request, pk=None): raise Http403 projects = [ - p for p in cr.files.all().values_list('project_identifier', flat=True).distinct('project_identifier') + p + for p in cr.files.all() + .values_list("project_identifier", flat=True) + .distinct("project_identifier") ] return Response(data=projects, status=status.HTTP_200_OK) @@ -53,7 +57,7 @@ def files_retrieve(self, request, pk=None, file_pk=None): """ Retrieve technical metadata of a single file associated with a dataset. """ - _logger.debug('Retrieving metadata of single file: %r' % file_pk) + _logger.debug("Retrieving metadata of single file: %r" % file_pk) # note: checks permissions cr = self.get_object() @@ -62,19 +66,19 @@ def files_retrieve(self, request, pk=None, file_pk=None): raise Http403(UNAUTHORIZED_TO_SEE_FILES_MSG) try: - params = { 'pk': int(file_pk) } + params = {"pk": int(file_pk)} except ValueError: - params = { 'identifier': file_pk } + params = {"identifier": file_pk} - manager = 'objects' + manager = "objects" - if CS.get_boolean_query_param(request, 'removed_files'): - params['removed'] = True - manager = 'objects_unfiltered' + if CS.get_boolean_query_param(request, "removed_files"): + params["removed"] = True + manager = "objects_unfiltered" file_fields = [] - if 'file_fields' in request.query_params: - file_fields = request.query_params['file_fields'].split(',') + if "file_fields" in request.query_params: + file_fields = request.query_params["file_fields"].split(",") file_fields = LightFileSerializer.ls_field_list(file_fields) @@ -96,7 +100,7 @@ def files_list(self, request, pk=None): NOTE! Take a look at api/rest/v2/router.py to see how this method is mapped to HTTP verb """ params = {} - manager = 'objects' + manager = "objects" # TODO: This applies only to IDA files, not remote resources. # TODO: Should this apply also to remote resources? cr = self.get_object() @@ -104,13 +108,13 @@ def files_list(self, request, pk=None): if not cr.authorized_to_see_catalog_record_files(request): raise Http403(UNAUTHORIZED_TO_SEE_FILES_MSG) - if CS.get_boolean_query_param(request, 'removed_files'): - params['removed'] = True - manager = 'objects_unfiltered' + if CS.get_boolean_query_param(request, "removed_files"): + params["removed"] = True + manager = "objects_unfiltered" file_fields = [] - if 'file_fields' in request.query_params: - file_fields = request.query_params['file_fields'].split(',') + if "file_fields" in request.query_params: + file_fields = request.query_params["file_fields"].split(",") file_fields = LightFileSerializer.ls_field_list(file_fields) queryset = cr.files(manager=manager).filter(**params).values(*file_fields) @@ -126,7 +130,7 @@ def files_post(self, request, pk=None): NOTE! Take a look at api/rest/v2/router.py to see how this method is mapped to HTTP verb """ if not request.data: - raise Http400('No data received') + raise Http400("No data received") # note: checks permissions cr = self.get_object() @@ -149,7 +153,7 @@ def files_user_metadata_list(self, request, pk=None): data = {} - for object_type in ('files', 'directories'): + for object_type in ("files", "directories"): if object_type in cr.research_dataset: data[object_type] = cr.research_dataset[object_type] @@ -163,7 +167,7 @@ def files_user_metadata_update(self, request, pk=None): This API does not add or remove files! Only updates metadata. """ if not request.data: - raise Http400('No data received') + raise Http400("No data received") # note: checks permissions cr = self.get_object() @@ -172,7 +176,11 @@ def files_user_metadata_update(self, request, pk=None): return Response(data=None, status=status.HTTP_200_OK) - @action(detail=True, methods=['get'], url_path="files/(?P.+)/user_metadata") + @action( + detail=True, + methods=["get"], + url_path="files/(?P.+)/user_metadata", + ) def files_user_metadata_retrieve(self, request, pk=None, obj_identifier=None): """ Retrieve user-provided dataset-specific metadata for a file or a directory associated with a dataset. @@ -184,14 +192,14 @@ def files_user_metadata_retrieve(self, request, pk=None, obj_identifier=None): if not cr.authorized_to_see_catalog_record_files(request): raise Http403(UNAUTHORIZED_TO_SEE_FILES_MSG) - if CS.get_boolean_query_param(request, 'directory'): + if CS.get_boolean_query_param(request, "directory"): # search from directories only if special query parameter is given - object_type = 'directories' + object_type = "directories" else: - object_type = 'files' + object_type = "files" for obj in cr.research_dataset.get(object_type, []): - if obj['identifier'] == obj_identifier: + if obj["identifier"] == obj_identifier: return Response(data=obj, status=status.HTTP_200_OK) raise Http404 diff --git a/src/metax_api/api/rpc/base/router.py b/src/metax_api/api/rpc/base/router.py index 85f69faf..1e799af0 100755 --- a/src/metax_api/api/rpc/base/router.py +++ b/src/metax_api/api/rpc/base/router.py @@ -27,19 +27,18 @@ class CustomRouter(DefaultRouter): - def get_default_basename(self, viewset): """ When a viewset has no queryset set, or base_name is not passed to a router as the 3rd parameter, automatically determine base name. """ - return viewset.__class__.__name__.split('RPC')[0] + return viewset.__class__.__name__.split("RPC")[0] router = CustomRouter(trailing_slash=False) -router.register(r'datasets/?', DatasetRPC) -router.register(r'files/?', FileRPC) -router.register(r'statistics/?', StatisticRPC) -router.register(r'elasticsearchs/?', ElasticsearchRPC) +router.register(r"datasets/?", DatasetRPC) +router.register(r"files/?", FileRPC) +router.register(r"statistics/?", StatisticRPC) +router.register(r"elasticsearchs/?", ElasticsearchRPC) api_urlpatterns = router.urls diff --git a/src/metax_api/api/rpc/base/views/common_rpc.py b/src/metax_api/api/rpc/base/views/common_rpc.py index 5f8f638b..f87a26e1 100755 --- a/src/metax_api/api/rpc/base/views/common_rpc.py +++ b/src/metax_api/api/rpc/base/views/common_rpc.py @@ -23,7 +23,7 @@ class CommonRPC(CommonViewSet): common tricks, such as saving errors to /apierrors, request modifications, permission objects... """ - api_type = 'rpc' + api_type = "rpc" # serves no purpose, but satisfies the ViewSet basic requirements serializer_class = FileSerializer @@ -37,7 +37,7 @@ def get_api_name(self): Some views where the below formula does not produce a sensible result will inherit this and return a customized result. """ - return '%ss' % self.__class__.__name__.split('RPC')[0].lower() + return "%ss" % self.__class__.__name__.split("RPC")[0].lower() def create(self, request, *args, **kwargs): raise Http501() diff --git a/src/metax_api/api/rpc/base/views/dataset_rpc.py b/src/metax_api/api/rpc/base/views/dataset_rpc.py index bda3a494..2450c615 100755 --- a/src/metax_api/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/base/views/dataset_rpc.py @@ -18,7 +18,11 @@ from metax_api.exceptions import Http400, Http403 from metax_api.models import CatalogRecord from metax_api.models.catalog_record import DataciteDOIUpdate -from metax_api.services.datacite_service import DataciteException, DataciteService, convert_cr_to_datacite_cr_json +from metax_api.services.datacite_service import ( + DataciteException, + DataciteService, + convert_cr_to_datacite_cr_json, +) from metax_api.utils import generate_doi_identifier, is_metax_generated_doi_identifier from .common_rpc import CommonRPC @@ -31,71 +35,83 @@ class DatasetRPC(CommonRPC): serializer_class = CatalogRecordSerializer object = CatalogRecord - @action(detail=False, methods=['get'], url_path="get_minimal_dataset_template") + @action(detail=False, methods=["get"], url_path="get_minimal_dataset_template") def get_minimal_dataset_template(self, request): - if request.query_params.get('type', None) not in ['service', 'enduser']: - raise Http400({ - 'detail': ['query param \'type\' missing or wrong. please specify ?type= as one of: service, enduser'] - }) - - with open('metax_api/exampledata/dataset_minimal.json', 'rb') as f: + if request.query_params.get("type", None) not in ["service", "enduser"]: + raise Http400( + { + "detail": [ + "query param 'type' missing or wrong. please specify ?type= as one of: service, enduser" + ] + } + ) + + with open("metax_api/exampledata/dataset_minimal.json", "rb") as f: example_ds = load(f) - example_ds['data_catalog'] = django_settings.END_USER_ALLOWED_DATA_CATALOGS[0] + example_ds["data_catalog"] = django_settings.END_USER_ALLOWED_DATA_CATALOGS[0] - if request.query_params['type'] == 'enduser': - example_ds.pop('metadata_provider_org', None) - example_ds.pop('metadata_provider_user', None) + if request.query_params["type"] == "enduser": + example_ds.pop("metadata_provider_org", None) + example_ds.pop("metadata_provider_user", None) return Response(example_ds) - @action(detail=False, methods=['post'], url_path="set_preservation_identifier") + @action(detail=False, methods=["post"], url_path="set_preservation_identifier") def set_preservation_identifier(self, request): - if not request.query_params.get('identifier', False): - raise Http400({ - 'detail': ['Query param \'identifier\' missing. Please specify ?identifier='] - }) + if not request.query_params.get("identifier", False): + raise Http400( + { + "detail": [ + "Query param 'identifier' missing. Please specify ?identifier=" + ] + } + ) try: - cr = self.object.objects.get(identifier=request.query_params['identifier']) + cr = self.object.objects.get(identifier=request.query_params["identifier"]) except self.object.DoesNotExist: raise Http404 if cr.preservation_identifier: # If cr preservation identifier already exists, make sure it also exists in Datacite - DataciteDOIUpdate(cr, cr.preservation_identifier, 'update')() + DataciteDOIUpdate(cr, cr.preservation_identifier, "update")() else: - pref_id = cr.research_dataset['preferred_identifier'] + pref_id = cr.research_dataset["preferred_identifier"] if is_metax_generated_doi_identifier(pref_id): # Copy metax generated doi identifier to preservation identifier. NOTE: This code should never be # reached since if pref id is a metax generated doi, it should have already been copied to preservation # identifier (ida dataset), but this is done just in case something has previously failed... - _logger.warning("Reached a code block in dataset_rpc set_preservation_identifier method, which should" - " not be reached.") + _logger.warning( + "Reached a code block in dataset_rpc set_preservation_identifier method, which should" + " not be reached." + ) cr.preservation_identifier = pref_id - action = 'update' + action = "update" else: # Generate a new DOI for the dataset. If pref id is a metax generated urn, use that urn's suffix as # the doi suffix. Otherwise generate completely new doi. - if pref_id.startswith('urn:nbn:fi:att:'): - cr.preservation_identifier = generate_doi_identifier(pref_id[len('urn:nbn:fi:att:'):]) + if pref_id.startswith("urn:nbn:fi:att:"): + cr.preservation_identifier = generate_doi_identifier( + pref_id[len("urn:nbn:fi:att:") :] + ) else: cr.preservation_identifier = generate_doi_identifier() - action = 'create' + action = "create" self._save_and_publish_dataset(cr, action) return Response(cr.preservation_identifier) - @action(detail=False, methods=['post'], url_path="change_cumulative_state") + @action(detail=False, methods=["post"], url_path="change_cumulative_state") def change_cumulative_state(self, request): - identifier = request.query_params.get('identifier', False) - state_value = request.query_params.get('cumulative_state', False) + identifier = request.query_params.get("identifier", False) + state_value = request.query_params.get("cumulative_state", False) if not identifier: - raise Http400('Query param \'identifier\' missing') + raise Http400("Query param 'identifier' missing") if not state_value: - raise Http400('Query param \'cumulative_state\' missing') + raise Http400("Query param 'cumulative_state' missing") try: cr = self.object.objects.get(identifier=identifier) @@ -103,73 +119,74 @@ def change_cumulative_state(self, request): raise Http404 if not cr.user_has_access(request): - raise Http403('You do not have permissions to modify this dataset') + raise Http403("You do not have permissions to modify this dataset") cr.request = request if cr.change_cumulative_state(state_value): # new version is created return_status = status.HTTP_200_OK - data = { 'new_version_created': self.get_serializer(cr).data['new_version_created'] } + data = {"new_version_created": self.get_serializer(cr).data["new_version_created"]} else: return_status = status.HTTP_204_NO_CONTENT data = None return Response(data=data, status=return_status) - @action(detail=False, methods=['post'], url_path="refresh_directory_content") + @action(detail=False, methods=["post"], url_path="refresh_directory_content") def refresh_directory_content(self, request): - cr_identifier = request.query_params.get('cr_identifier', False) - dir_identifier = request.query_params.get('dir_identifier', False) + cr_identifier = request.query_params.get("cr_identifier", False) + dir_identifier = request.query_params.get("dir_identifier", False) if not cr_identifier: - raise Http400('Query param \'cr_identifier\' missing.') + raise Http400("Query param 'cr_identifier' missing.") if not dir_identifier: - raise Http400('Query param \'dir_identifier\' missing.') + raise Http400("Query param 'dir_identifier' missing.") try: cr = self.object.objects.get(identifier=cr_identifier) except self.object.DoesNotExist: - raise Http404(f'CatalogRecord \'{cr_identifier}\' could not be found') + raise Http404(f"CatalogRecord '{cr_identifier}' could not be found") if not cr.user_has_access(request): - raise Http403('You do not have permissions to modify this dataset') + raise Http403("You do not have permissions to modify this dataset") cr.request = request new_version, n_added_files = cr.refresh_directory_content(dir_identifier) - data = { 'number_of_files_added': n_added_files } + data = {"number_of_files_added": n_added_files} if new_version: - data['new_version_created'] = self.get_serializer(cr).data['new_version_created'] + data["new_version_created"] = self.get_serializer(cr).data["new_version_created"] return Response(data=data, status=status.HTTP_200_OK) - @action(detail=False, methods=['post'], url_path="fix_deprecated") + @action(detail=False, methods=["post"], url_path="fix_deprecated") def fix_deprecated(self, request): - if not request.query_params.get('identifier', False): - raise Http400('Query param \'identifier\' missing. Please specify ?identifier=') + if not request.query_params.get("identifier", False): + raise Http400( + "Query param 'identifier' missing. Please specify ?identifier=" + ) try: - cr = self.object.objects.get(identifier=request.query_params['identifier']) + cr = self.object.objects.get(identifier=request.query_params["identifier"]) except self.object.DoesNotExist: raise Http404 if not cr.deprecated: - raise Http400('Requested catalog record is not deprecated') + raise Http400("Requested catalog record is not deprecated") cr.fix_deprecated() - data = { 'new_version_created': self.get_serializer(cr).data['new_version_created'] } + data = {"new_version_created": self.get_serializer(cr).data["new_version_created"]} return Response(data=data, status=status.HTTP_200_OK) def _save_and_publish_dataset(self, cr, action): try: - DataciteService().get_validated_datacite_json( - convert_cr_to_datacite_cr_json(cr), True) + DataciteService().get_validated_datacite_json(convert_cr_to_datacite_cr_json(cr), True) except DataciteException as e: raise Http400(str(e)) - super(CatalogRecord, cr).save(update_fields=['preservation_identifier']) + super(CatalogRecord, cr).save(update_fields=["preservation_identifier"]) DataciteDOIUpdate(cr, cr.preservation_identifier, action)() diff --git a/src/metax_api/api/rpc/base/views/elasticsearch_rpc.py b/src/metax_api/api/rpc/base/views/elasticsearch_rpc.py index bddcb072..c0a8e915 100755 --- a/src/metax_api/api/rpc/base/views/elasticsearch_rpc.py +++ b/src/metax_api/api/rpc/base/views/elasticsearch_rpc.py @@ -21,8 +21,7 @@ class ElasticsearchRPC(CommonRPC): - - @action(detail=False, methods=['get'], url_path="map_refdata") + @action(detail=False, methods=["get"], url_path="map_refdata") def map_refdata(self, request): if not isinstance(django_settings, dict): @@ -30,69 +29,69 @@ def map_refdata(self, request): connection_params = RDL.get_connection_parameters(settings) # returns scan object as well but that is not needed here - esclient = RDL.get_es_imports(settings['HOSTS'], connection_params)[0] + esclient = RDL.get_es_imports(settings["HOSTS"], connection_params)[0] if not self.request.query_params: return Response(status=status.HTTP_200_OK) - elif '_mapping' in self.request.query_params: + elif "_mapping" in self.request.query_params: try: res = esclient.indices.get_mapping() except Exception as e: - raise Http400(f'Error when accessing elasticsearch. {e}') + raise Http400(f"Error when accessing elasticsearch. {e}") return Response(data=res, status=status.HTTP_200_OK) params = {} for k, v in self.request.query_params.items(): # python dict.items() keeps order so the url is always the first one - if '_data/' in k: - splitted = k.split('/') + if "_data/" in k: + splitted = k.split("/") if len(splitted) < 3: - _logger.info('Bad url to elasticsearch proxy') + _logger.info("Bad url to elasticsearch proxy") return Response(data=None, status=status.HTTP_204_NO_CONTENT) idx = splitted[0] type = splitted[1] - if '?' in splitted[2]: - action, first_param = splitted[2].split('?') + if "?" in splitted[2]: + action, first_param = splitted[2].split("?") else: action, first_param = splitted[2], None - if action != '_search': - _logger.info('Bad action to elasticsearch proxy') + if action != "_search": + _logger.info("Bad action to elasticsearch proxy") return Response(data=None, status=status.HTTP_204_NO_CONTENT) if first_param: params[first_param] = v - elif k == 'pretty': - params[k] = 'true' if v else 'false' + elif k == "pretty": + params[k] = "true" if v else "false" - elif k == 'q': + elif k == "q": try: # new ES client separates filters with a space - v = v.replace('+AND+', ' AND ') - v = v.replace('+OR+', ' OR ') - if 'type:' not in v: - params[k] = v + f' AND type:{type}' + v = v.replace("+AND+", " AND ") + v = v.replace("+OR+", " OR ") + if "type:" not in v: + params[k] = v + f" AND type:{type}" else: params[k] = v except: - _logger.info('Elasticsearch proxy has missing type. This should not happen') + _logger.info("Elasticsearch proxy has missing type. This should not happen") return Response(data=None, status=status.HTTP_204_NO_CONTENT) else: params[k] = v - if 'q' not in params: - params['q'] = f'type:{type}' + if "q" not in params: + params["q"] = f"type:{type}" try: res = esclient.search(index=idx, params=params) except Exception as e: - raise Http400(f'Error when accessing elasticsearch. {e}') + raise Http400(f"Error when accessing elasticsearch. {e}") - return Response(data=res, status=status.HTTP_200_OK) \ No newline at end of file + return Response(data=res, status=status.HTTP_200_OK) diff --git a/src/metax_api/api/rpc/base/views/file_rpc.py b/src/metax_api/api/rpc/base/views/file_rpc.py index 65b887e2..eb8dd169 100755 --- a/src/metax_api/api/rpc/base/views/file_rpc.py +++ b/src/metax_api/api/rpc/base/views/file_rpc.py @@ -22,54 +22,55 @@ class FileRPC(CommonRPC): - - @action(detail=False, methods=['post'], url_path="delete_project") + @action(detail=False, methods=["post"], url_path="delete_project") def delete_project(self, request): """ Marks files deleted, deprecates related datasets and removes all directories. """ - if 'project_identifier' not in request.query_params: - raise Http400({ 'detail': ['required query parameter project_identifier missing'] }) + if "project_identifier" not in request.query_params: + raise Http400({"detail": ["required query parameter project_identifier missing"]}) - return FileService.delete_project(request.query_params['project_identifier']) + return FileService.delete_project(request.query_params["project_identifier"]) - @action(detail=False, methods=['post'], url_path="flush_project") - def flush_project(self, request): # pragma: no cover + @action(detail=False, methods=["post"], url_path="flush_project") + def flush_project(self, request): # pragma: no cover """ Permanently delete an entire project's files and directories. WARNING! Does not check file association with datasets! Not meant for active production use!! """ - if settings.ENV == 'production': - raise Http400({ 'detail': ['API currently allowed only in test environments'] }) + if settings.ENV == "production": + raise Http400({"detail": ["API currently allowed only in test environments"]}) - if 'project_identifier' not in request.query_params: - raise Http400({ 'detail': ['project_identifier is a required query parameter'] }) + if "project_identifier" not in request.query_params: + raise Http400({"detail": ["project_identifier is a required query parameter"]}) - project = request.query_params['project_identifier'] + project = request.query_params["project_identifier"] - sql_delete_cr_files = ''' + sql_delete_cr_files = """ delete from metax_api_catalogrecord_files where file_id in ( select id from metax_api_file where project_identifier = %s ) - ''' + """ - sql_delete_files = ''' + sql_delete_files = """ delete from metax_api_file where project_identifier = %s - ''' + """ - sql_delete_directories = ''' + sql_delete_directories = """ delete from metax_api_directory where project_identifier = %s - ''' + """ - _logger.info('Flushing project %s on the request of user: %s' % (project, request.user.username)) + _logger.info( + "Flushing project %s on the request of user: %s" % (project, request.user.username) + ) with connection.cursor() as cr: cr.execute(sql_delete_cr_files, [project]) cr.execute(sql_delete_files, [project]) cr.execute(sql_delete_directories, [project]) - _logger.info('Permanently deleted all files and directories from project %s' % project) + _logger.info("Permanently deleted all files and directories from project %s" % project) return Response(data=None, status=status.HTTP_204_NO_CONTENT) diff --git a/src/metax_api/api/rpc/base/views/statistic_rpc.py b/src/metax_api/api/rpc/base/views/statistic_rpc.py index e4176cfc..41e85f71 100755 --- a/src/metax_api/api/rpc/base/views/statistic_rpc.py +++ b/src/metax_api/api/rpc/base/views/statistic_rpc.py @@ -18,111 +18,121 @@ _logger = logging.getLogger(__name__) -date_re = re.compile( - r'^\d{4}-\d{2}$' -) +date_re = re.compile(r"^\d{4}-\d{2}$") -class StatisticRPC(CommonRPC): - @action(detail=False, methods=['get'], url_path='all_datasets_cumulative') +class StatisticRPC(CommonRPC): + @action(detail=False, methods=["get"], url_path="all_datasets_cumulative") def all_datasets_cumulative(self, request): - if not request.query_params.get('from_date', None) or not request.query_params.get('to_date', None): - raise Http400('from_date and to_date parameters are required') + if not request.query_params.get("from_date", None) or not request.query_params.get( + "to_date", None + ): + raise Http400("from_date and to_date parameters are required") str_params = { - 'from_date': request.query_params.get('from_date', None), - 'to_date': request.query_params.get('to_date', None), + "from_date": request.query_params.get("from_date", None), + "to_date": request.query_params.get("to_date", None), } - if not date_re.match(str_params['from_date']) or not date_re.match(str_params['to_date']): - raise Http400('date parameter format is \'YYYY-MM\'') + if not date_re.match(str_params["from_date"]) or not date_re.match(str_params["to_date"]): + raise Http400("date parameter format is 'YYYY-MM'") - params = { param: request.query_params.get(param, None) for param in str_params } + params = {param: request.query_params.get(param, None) for param in str_params} - for boolean_param in ['latest', 'legacy', 'removed']: + for boolean_param in ["latest", "legacy", "removed"]: if boolean_param in request.query_params: params[boolean_param] = CS.get_boolean_query_param(request, boolean_param) return Response(StatisticService.total_datasets(**params)) - @action(detail=False, methods=['get'], url_path='catalog_datasets_cumulative') + @action(detail=False, methods=["get"], url_path="catalog_datasets_cumulative") def catalog_datasets_cumulative(self, request): - if not request.query_params.get('from_date', None) or not request.query_params.get('to_date', None): - raise Http400('from_date and to_date parameters are required') + if not request.query_params.get("from_date", None) or not request.query_params.get( + "to_date", None + ): + raise Http400("from_date and to_date parameters are required") params = { - 'from_date': request.query_params.get('from_date', None), - 'to_date': request.query_params.get('to_date', None), - 'data_catalog': request.query_params.get('data_catalog', None), + "from_date": request.query_params.get("from_date", None), + "to_date": request.query_params.get("to_date", None), + "data_catalog": request.query_params.get("data_catalog", None), } return Response(StatisticService.total_data_catalog_datasets(**params)) - @action(detail=False, methods=['get'], url_path='count_datasets') + @action(detail=False, methods=["get"], url_path="count_datasets") def count_datasets(self, request): str_params = [ - 'access_type', - 'data_catalog', - 'from_date', - 'metadata_owner_org', - 'metadata_provider_org', - 'metadata_provider_user', - 'preservation_state', - 'to_date' + "access_type", + "data_catalog", + "from_date", + "metadata_owner_org", + "metadata_provider_org", + "metadata_provider_user", + "preservation_state", + "to_date", ] - params = { param: request.query_params.get(param, None) for param in str_params } + params = {param: request.query_params.get(param, None) for param in str_params} - for boolean_param in ['deprecated', 'harvested', 'latest', 'legacy', 'removed']: + for boolean_param in ["deprecated", "harvested", "latest", "legacy", "removed"]: if boolean_param in request.query_params: params[boolean_param] = CS.get_boolean_query_param(request, boolean_param) return Response(StatisticService.count_datasets(**params)) - @action(detail=False, methods=['get'], url_path='deprecated_datasets_cumulative') + @action(detail=False, methods=["get"], url_path="deprecated_datasets_cumulative") def deprecated_datasets_cumulative(self, request): - if not request.query_params.get('from_date', None) or not request.query_params.get('to_date', None): - raise Http400('from_date and to_date parameters are required') + if not request.query_params.get("from_date", None) or not request.query_params.get( + "to_date", None + ): + raise Http400("from_date and to_date parameters are required") params = { - 'from_date': request.query_params.get('from_date', None), - 'to_date': request.query_params.get('to_date', None), + "from_date": request.query_params.get("from_date", None), + "to_date": request.query_params.get("to_date", None), } return Response(StatisticService.deprecated_datasets_cumulative(**params)) - @action(detail=False, methods=['get'], url_path='end_user_datasets_cumulative') + @action(detail=False, methods=["get"], url_path="end_user_datasets_cumulative") def end_user_datasets_cumulative(self, request): - if not request.query_params.get('from_date', None) or not request.query_params.get('to_date', None): - raise Http400('from_date and to_date parameters are required') + if not request.query_params.get("from_date", None) or not request.query_params.get( + "to_date", None + ): + raise Http400("from_date and to_date parameters are required") params = { - 'from_date': request.query_params.get('from_date', None), - 'to_date': request.query_params.get('to_date', None), + "from_date": request.query_params.get("from_date", None), + "to_date": request.query_params.get("to_date", None), } return Response(StatisticService.total_end_user_datasets(**params)) - @action(detail=False, methods=['get'], url_path='harvested_datasets_cumulative') + @action(detail=False, methods=["get"], url_path="harvested_datasets_cumulative") def harvested_datasets_cumulative(self, request): - if not request.query_params.get('from_date', None) or not request.query_params.get('to_date', None): - raise Http400('from_date and to_date parameters are required') + if not request.query_params.get("from_date", None) or not request.query_params.get( + "to_date", None + ): + raise Http400("from_date and to_date parameters are required") params = { - 'from_date': request.query_params.get('from_date', None), - 'to_date': request.query_params.get('to_date', None), + "from_date": request.query_params.get("from_date", None), + "to_date": request.query_params.get("to_date", None), } return Response(StatisticService.total_harvested_datasets(**params)) - @action(detail=False, methods=['get'], url_path='organization_datasets_cumulative') + @action(detail=False, methods=["get"], url_path="organization_datasets_cumulative") def organization_datasets_cumulative(self, request): - if not request.query_params.get('from_date', None) or not request.query_params.get('to_date', None): - raise Http400('from_date and to_date parameters are required') + if not request.query_params.get("from_date", None) or not request.query_params.get( + "to_date", None + ): + raise Http400("from_date and to_date parameters are required") params = { - 'from_date': request.query_params.get('from_date', None), - 'to_date': request.query_params.get('to_date', None), - 'metadata_owner_org': request.query_params.get('metadata_owner_org', None), + "from_date": request.query_params.get("from_date", None), + "to_date": request.query_params.get("to_date", None), + "metadata_owner_org": request.query_params.get("metadata_owner_org", None), } return Response(StatisticService.total_organization_datasets(**params)) - @action(detail=False, methods=['get'], url_path='unused_files') + @action(detail=False, methods=["get"], url_path="unused_files") def unused_files(self, request): return Response(StatisticService.unused_files()) diff --git a/src/metax_api/api/rpc/v2/router.py b/src/metax_api/api/rpc/v2/router.py index 792edd95..859f1eb1 100755 --- a/src/metax_api/api/rpc/v2/router.py +++ b/src/metax_api/api/rpc/v2/router.py @@ -29,18 +29,17 @@ class CustomRouter(DefaultRouter): - def get_default_basename(self, viewset): """ When a viewset has no queryset set, or base_name is not passed to a router as the 3rd parameter, automatically determine base name. """ - return viewset.__class__.__name__.split('RPC')[0] + return viewset.__class__.__name__.split("RPC")[0] router = CustomRouter(trailing_slash=False) -router.register(r'datasets/?', DatasetRPC) -router.register(r'files/?', FileRPC) -router.register(r'statistics/?', StatisticRPC) +router.register(r"datasets/?", DatasetRPC) +router.register(r"files/?", FileRPC) +router.register(r"statistics/?", StatisticRPC) api_urlpatterns = router.urls diff --git a/src/metax_api/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/api/rpc/v2/views/dataset_rpc.py index 752b7447..cbffd5c4 100755 --- a/src/metax_api/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/v2/views/dataset_rpc.py @@ -30,15 +30,17 @@ def get_object(self): RPC api does not handle parameters the exact same way as REST api, so need to re-define get_object() to work properly. """ - if not self.request.query_params.get('identifier', False): - raise Http400('Query param \'identifier\' missing. Please specify ?identifier=') + if not self.request.query_params.get("identifier", False): + raise Http400( + "Query param 'identifier' missing. Please specify ?identifier=" + ) params = {} try: - params['pk'] = int(self.request.query_params['identifier']) + params["pk"] = int(self.request.query_params["identifier"]) except ValueError: - params['identifier'] = self.request.query_params['identifier'] + params["identifier"] = self.request.query_params["identifier"] try: cr = self.object.objects.get(**params) @@ -51,13 +53,13 @@ def get_object(self): return cr - @action(detail=False, methods=['post'], url_path="change_cumulative_state") + @action(detail=False, methods=["post"], url_path="change_cumulative_state") def change_cumulative_state(self, request): - state_value = request.query_params.get('cumulative_state', False) + state_value = request.query_params.get("cumulative_state", False) if not state_value: - raise Http400('Query param \'cumulative_state\' missing') + raise Http400("Query param 'cumulative_state' missing") cr = self.get_object() @@ -67,7 +69,7 @@ def change_cumulative_state(self, request): return Response(status=status.HTTP_204_NO_CONTENT) - @action(detail=False, methods=['post'], url_path="create_draft") + @action(detail=False, methods=["post"], url_path="create_draft") def create_draft(self, request): cr = self.get_object() @@ -75,14 +77,11 @@ def create_draft(self, request): cr.create_draft() return Response( - data={ - 'id': cr.next_draft.id, - 'identifier': cr.next_draft.identifier - }, - status=status.HTTP_201_CREATED + data={"id": cr.next_draft.id, "identifier": cr.next_draft.identifier}, + status=status.HTTP_201_CREATED, ) - @action(detail=False, methods=['post'], url_path="create_new_version") + @action(detail=False, methods=["post"], url_path="create_new_version") def create_new_version(self, request): cr = self.get_object() @@ -91,13 +90,13 @@ def create_new_version(self, request): return Response( data={ - 'id': cr.next_dataset_version.id, - 'identifier': cr.next_dataset_version.identifier + "id": cr.next_dataset_version.id, + "identifier": cr.next_dataset_version.identifier, }, - status=status.HTTP_201_CREATED + status=status.HTTP_201_CREATED, ) - @action(detail=False, methods=['post'], url_path="publish_dataset") + @action(detail=False, methods=["post"], url_path="publish_dataset") def publish_dataset(self, request): cr = self.get_object() @@ -105,11 +104,11 @@ def publish_dataset(self, request): cr.publish_dataset() return Response( - data={ 'preferred_identifier': cr.preferred_identifier }, - status=status.HTTP_200_OK + data={"preferred_identifier": cr.preferred_identifier}, + status=status.HTTP_200_OK, ) - @action(detail=False, methods=['post'], url_path="merge_draft") + @action(detail=False, methods=["post"], url_path="merge_draft") def merge_draft(self, request): cr = self.get_object() @@ -118,17 +117,14 @@ def merge_draft(self, request): # the identifier points to the original published dataset. identifier_of_published = cr.merge_draft() - return Response( - data={ 'identifier': identifier_of_published }, - status=status.HTTP_200_OK - ) + return Response(data={"identifier": identifier_of_published}, status=status.HTTP_200_OK) # ensure some v1 api endpoints cant be called in v2 api - @action(detail=False, methods=['post'], url_path="refresh_directory_content") + @action(detail=False, methods=["post"], url_path="refresh_directory_content") def refresh_directory_content(self, request): raise Http501() - @action(detail=False, methods=['post'], url_path="fix_deprecated") + @action(detail=False, methods=["post"], url_path="fix_deprecated") def fix_deprecated(self, request): raise Http501() diff --git a/src/metax_api/checks.py b/src/metax_api/checks.py index 24e94bb4..0cf9468c 100755 --- a/src/metax_api/checks.py +++ b/src/metax_api/checks.py @@ -3,7 +3,9 @@ from watchman.decorators import check from metax_api.services.redis_cache_service import RedisClient -from metax_api.tasks.refdata.refdata_indexer.service.elasticsearch_service import ElasticSearchService +from metax_api.tasks.refdata.refdata_indexer.service.elasticsearch_service import ( + ElasticSearchService, +) logger = logging.getLogger(__name__) @@ -40,6 +42,4 @@ def redis_check(): return {"redis": [{"key: reference_data": {"ok": True}}]} except Exception as e: logger.error(e) - return { - "redis": {"ok": False, "error": str(e), "traceback": str(e.__traceback__)} - } + return {"redis": {"ok": False, "error": str(e), "traceback": str(e.__traceback__)}} diff --git a/src/metax_api/exceptions/http_exceptions.py b/src/metax_api/exceptions/http_exceptions.py index 050848af..443601cc 100755 --- a/src/metax_api/exceptions/http_exceptions.py +++ b/src/metax_api/exceptions/http_exceptions.py @@ -24,43 +24,50 @@ def __init__(self, detail=None, code=None): } """ -class MetaxAPIException(APIException): +class MetaxAPIException(APIException): def __init__(self, detail=None, code=None): """ Override so that exceptions can be used in a more simple manner such that the message always ends up in an array. """ if type(detail) is str: - detail = { 'detail': [detail] } + detail = {"detail": [detail]} super().__init__(detail=detail, code=code) + class Http400(MetaxAPIException): # bad request status_code = 400 + class Http401(MetaxAPIException): # unauthorized # note: request is missing authentication information, or it was wrong status_code = 401 + class Http403(MetaxAPIException): # forbidden # note: request user is correctly authenticated, but has no permission status_code = 403 + class Http412(MetaxAPIException): # precondition failed status_code = 412 + class Http500(MetaxAPIException): # internal server error status_code = 500 + class Http501(MetaxAPIException): # not implemented status_code = 501 + class Http503(MetaxAPIException): # service unavailable status_code = 503 diff --git a/src/metax_api/management/commands/first_time_setup.py b/src/metax_api/management/commands/first_time_setup.py index 6a28f634..de90affd 100644 --- a/src/metax_api/management/commands/first_time_setup.py +++ b/src/metax_api/management/commands/first_time_setup.py @@ -13,4 +13,4 @@ def handle(self, *args, **options): call_command("reload_refdata_cache") call_command("loaddata", "metax_api/tests/testdata/test_data.json") call_command("loadinitialdata") - logger.info("All first time setup commands completed successfully") \ No newline at end of file + logger.info("All first time setup commands completed successfully") diff --git a/src/metax_api/management/commands/fix_file_counts.py b/src/metax_api/management/commands/fix_file_counts.py index 8bdc7a53..1c59805e 100644 --- a/src/metax_api/management/commands/fix_file_counts.py +++ b/src/metax_api/management/commands/fix_file_counts.py @@ -6,12 +6,13 @@ logger = logging.getLogger(__name__) + class Command(BaseCommand): def handle(self, *args, **options): dirs_with_no_files = Directory.objects_unfiltered.all() dir_sum = dirs_with_no_files.count() logger.info(f"fix_file_counts command found {dir_sum} directories") - i=0 + i = 0 for dir in dirs_with_no_files: i += 1 try: @@ -19,4 +20,4 @@ def handle(self, *args, **options): except Exception as e: logger.error(f"can't fix filecount for directory {i}/{dir_sum}") logger.info(f"folder {i}/{dir_sum} has {dir.file_count} files after recalculation") - logger.info(f"fix_file_counts command executed successfully") \ No newline at end of file + logger.info(f"fix_file_counts command executed successfully") diff --git a/src/metax_api/management/commands/fix_file_counts_cr.py b/src/metax_api/management/commands/fix_file_counts_cr.py index 6768ffc7..d709378c 100644 --- a/src/metax_api/management/commands/fix_file_counts_cr.py +++ b/src/metax_api/management/commands/fix_file_counts_cr.py @@ -6,14 +6,17 @@ logger = logging.getLogger(__name__) + class Command(BaseCommand): def handle(self, *args, **options): CRS = CatalogRecord.objects.all() crs_sum = CRS.count() - logger.info(f"fix_file_counts command found {crs_sum} catalog records with file_count=0 and byte_size=0") + logger.info( + f"fix_file_counts command found {crs_sum} catalog records with file_count=0 and byte_size=0" + ) i = 1 for catalog_record in CRS: logger.info(f"Calculating {i}/{crs_sum} {catalog_record.identifier} ") catalog_record.calculate_directory_byte_sizes_and_file_counts() i += 1 - logger.info(f"fix_file_counts command executed successfully") \ No newline at end of file + logger.info(f"fix_file_counts command executed successfully") diff --git a/src/metax_api/management/commands/fix_file_counts_for_cr.py b/src/metax_api/management/commands/fix_file_counts_for_cr.py index 2409ae14..cba88c3b 100644 --- a/src/metax_api/management/commands/fix_file_counts_for_cr.py +++ b/src/metax_api/management/commands/fix_file_counts_for_cr.py @@ -6,10 +6,11 @@ logger = logging.getLogger(__name__) + class Command(BaseCommand): def handle(self, *args, **options): CRS = CatalogRecord.objects.all() for catalog_record in CRS: catalog_record.calculate_directory_byte_sizes_and_file_counts() logger.info(f"Calculating {catalog_record.identifier} ") - logger.info(f"fix_file_counts command executed successfully") \ No newline at end of file + logger.info(f"fix_file_counts command executed successfully") diff --git a/src/metax_api/management/commands/load_data_to_TTV.py b/src/metax_api/management/commands/load_data_to_TTV.py index a451fd9a..651d1ff6 100644 --- a/src/metax_api/management/commands/load_data_to_TTV.py +++ b/src/metax_api/management/commands/load_data_to_TTV.py @@ -20,29 +20,31 @@ class User: def __init__(self): self.is_service = True + class Request: def __init__(self, user): self.user = user self.query_params = [] - self.method = 'POST' + self.method = "POST" + class Command(BaseCommand): help = "Upload all existing data to TTV's RabbitMQ queue" def handle(self, *args, **options): - catalog_records = CatalogRecord.objects.filter(state='published') + catalog_records = CatalogRecord.objects.filter(state="published") aff_rows = 0 user = User() request = Request(user) - context = {'request': request} + context = {"request": request} for catalog_record in catalog_records: serializer = catalog_record.serializer_class cr_json = serializer(catalog_record, context=context).data - cr_json['data_catalog'] = {'catalog_json': catalog_record.data_catalog.catalog_json} + cr_json["data_catalog"] = {"catalog_json": catalog_record.data_catalog.catalog_json} - RabbitMQService.publish(cr_json, routing_key='create', exchange="TTV-datasets") + RabbitMQService.publish(cr_json, routing_key="create", exchange="TTV-datasets") aff_rows += 1 logger.info("All catalog records published to TTV exchange") diff --git a/src/metax_api/management/commands/loadinitialdata.py b/src/metax_api/management/commands/loadinitialdata.py index eb904c62..641022f2 100755 --- a/src/metax_api/management/commands/loadinitialdata.py +++ b/src/metax_api/management/commands/loadinitialdata.py @@ -16,6 +16,7 @@ logger = logging.getLogger(__name__) + class Command(BaseCommand): help = "Load initial data for Metax: Data catalogs, file storages." @@ -25,7 +26,10 @@ def handle(self, *args, **options): data_catalogs = json.load(f) for json_dc in data_catalogs: try: - dc = DataCatalog(catalog_json=json_dc["catalog_json"], date_created=datetime.datetime.now()) + dc = DataCatalog( + catalog_json=json_dc["catalog_json"], + date_created=datetime.datetime.now(), + ) dc.catalog_record_services_create = json_dc["catalog_record_services_create"] dc.catalog_record_services_edit = json_dc["catalog_record_services_edit"] dc.catalog_record_services_read = json_dc["catalog_record_services_read"] @@ -38,7 +42,10 @@ def handle(self, *args, **options): storages = json.load(f) for fs in storages: try: - fs = FileStorage(file_storage_json=fs["file_storage_json"], date_created=datetime.datetime.now()) + fs = FileStorage( + file_storage_json=fs["file_storage_json"], + date_created=datetime.datetime.now(), + ) fs.save() except IntegrityError as e: logger.error("filestorage already exists in the database") diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index a9c27fa4..a57b87fa 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -37,17 +37,17 @@ from .directory import Directory from .file import File -READ_METHODS = ('GET', 'HEAD', 'OPTIONS') +READ_METHODS = ("GET", "HEAD", "OPTIONS") DEBUG = settings.DEBUG _logger = logging.getLogger(__name__) ACCESS_TYPES = { - 'open': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/open', - 'login': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/login', - 'permit': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/permit', - 'embargo': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/embargo', - 'restricted': 'http://uri.suomi.fi/codelist/fairdata/access_type/code/restricted' + "open": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "login": "http://uri.suomi.fi/codelist/fairdata/access_type/code/login", + "permit": "http://uri.suomi.fi/codelist/fairdata/access_type/code/permit", + "embargo": "http://uri.suomi.fi/codelist/fairdata/access_type/code/embargo", + "restricted": "http://uri.suomi.fi/codelist/fairdata/access_type/code/restricted", } @@ -57,6 +57,7 @@ PAS_CATALOG = settings.PAS_DATA_CATALOG_IDENTIFIER DFT_CATALOG = settings.DFT_DATA_CATALOG_IDENTIFIER + class DiscardRecord(Exception): pass @@ -73,7 +74,7 @@ class AlternateRecordSet(models.Model): id = models.BigAutoField(primary_key=True, editable=False) - def print_records(self): # pragma: no cover + def print_records(self): # pragma: no cover for r in self.records.all(): print(r.__repr__()) @@ -86,6 +87,7 @@ class DatasetVersionSet(models.Model): Note! Does not inherit from model Common, so does not have timestmap fields, and a delete is an actual delete. """ + id = models.BigAutoField(primary_key=True, editable=False) def get_listing(self): @@ -93,27 +95,31 @@ def get_listing(self): Return a list of record preferred_identifiers that belong in the same dataset version chain. Latest first. """ - records = self.records(manager='objects_unfiltered') \ - .filter(state=CatalogRecord.STATE_PUBLISHED) \ - .order_by('-date_created') + records = ( + self.records(manager="objects_unfiltered") + .filter(state=CatalogRecord.STATE_PUBLISHED) + .order_by("-date_created") + ) versions = [ { - 'identifier': r.identifier, - 'preferred_identifier': r.preferred_identifier, - 'removed': r.removed, - 'date_created': r.date_created.astimezone().isoformat(), - 'date_removed': r.date_removed.astimezone().isoformat() if r.date_removed else None + "identifier": r.identifier, + "preferred_identifier": r.preferred_identifier, + "removed": r.removed, + "date_created": r.date_created.astimezone().isoformat(), + "date_removed": r.date_removed.astimezone().isoformat() if r.date_removed else None, } for r in records ] # dont show the date_removed field at all if the value is None (record has not been removed) - versions = [{key: value for (key, value) in i.items() if value is not None} for i in versions] + versions = [ + {key: value for (key, value) in i.items() if value is not None} for i in versions + ] return versions - def print_records(self): # pragma: no cover + def print_records(self): # pragma: no cover for r in self.records.all(): print(r.__repr__()) @@ -125,65 +131,80 @@ class ResearchDatasetVersion(models.Model): metadata_version_identifier = models.CharField(max_length=200, unique=True) preferred_identifier = models.CharField(max_length=200) research_dataset = JSONField() - catalog_record = models.ForeignKey('CatalogRecord', on_delete=models.DO_NOTHING, - related_name='research_dataset_versions') + catalog_record = models.ForeignKey( + "CatalogRecord", + on_delete=models.DO_NOTHING, + related_name="research_dataset_versions", + ) class Meta: indexes = [ - models.Index(fields=['metadata_version_identifier']), + models.Index(fields=["metadata_version_identifier"]), ] def __str__(self): return self.__repr__() def __repr__(self): - return '<%s: %d, cr: %d, metadata_version_identifier: %s, stored_to_pas: %s>' \ - % ( - 'ResearchDatasetVersion', - self.id, - self.catalog_record_id, - self.metadata_version_identifier, - str(self.stored_to_pas), - ) + return "<%s: %d, cr: %d, metadata_version_identifier: %s, stored_to_pas: %s>" % ( + "ResearchDatasetVersion", + self.id, + self.catalog_record_id, + self.metadata_version_identifier, + str(self.stored_to_pas), + ) class CatalogRecordManager(CommonManager): - def get(self, *args, **kwargs): - if kwargs.get('using_dict', None): + if kwargs.get("using_dict", None): # for a simple "just get me the instance that equals this dict i have" search. # preferred_identifier is not a valid search key, since it wouldnt necessarily # work during an update (if preferred_identifier is being updated). # this is useful if during a request the url does not contain the identifier (bulk update), # and in generic operations where the type of object being handled is not known (also bulk operations). - row = kwargs.pop('using_dict') - if row.get('id', None): - kwargs['id'] = row['id'] - elif row.get('identifier', None): - kwargs['identifier'] = row['identifier'] - elif row.get('research_dataset', None) and row['research_dataset'].get('metadata_version_identifier', None): + row = kwargs.pop("using_dict") + if row.get("id", None): + kwargs["id"] = row["id"] + elif row.get("identifier", None): + kwargs["identifier"] = row["identifier"] + elif row.get("research_dataset", None) and row["research_dataset"].get( + "metadata_version_identifier", None + ): # todo probably remove at some point - kwargs['research_dataset__contains'] = { - 'metadata_version_identifier': row['research_dataset']['metadata_version_identifier'] + kwargs["research_dataset__contains"] = { + "metadata_version_identifier": row["research_dataset"][ + "metadata_version_identifier" + ] } else: raise ValidationError( - 'this operation requires an identifying key to be present: id, or identifier') + "this operation requires an identifying key to be present: id, or identifier" + ) return super(CatalogRecordManager, self).get(*args, **kwargs) - def get_id(self, metadata_version_identifier=None): # pragma: no cover + def get_id(self, metadata_version_identifier=None): # pragma: no cover """ Takes metadata_version_identifier, and returns the plain pk of the record. Useful for debugging """ if not metadata_version_identifier: - raise ValidationError('metadata_version_identifier is a required keyword argument') - cr = super(CatalogRecordManager, self).filter( - **{ 'research_dataset__contains': {'metadata_version_identifier': metadata_version_identifier } } - ).values('id').first() + raise ValidationError("metadata_version_identifier is a required keyword argument") + cr = ( + super(CatalogRecordManager, self) + .filter( + **{ + "research_dataset__contains": { + "metadata_version_identifier": metadata_version_identifier + } + } + ) + .values("id") + .first() + ) if not cr: raise Http404 - return cr['id'] + return cr["id"] class CatalogRecord(Common): @@ -207,23 +228,35 @@ class CatalogRecord(Common): PRESERVATION_STATE_IN_DISSEMINATION = 140 PRESERVATION_STATE_CHOICES = ( - (PRESERVATION_STATE_INITIALIZED, 'Initialized'), - (PRESERVATION_STATE_PROPOSED, 'Proposed for digital preservation'), - (PRESERVATION_STATE_TECHNICAL_METADATA_GENERATED, 'Technical metadata generated'), - (PRESERVATION_STATE_TECHNICAL_METADATA_GENERATED_FAILED, 'Technical metadata generation failed'), - (PRESERVATION_STATE_INVALID_METADATA, 'Invalid metadata'), - (PRESERVATION_STATE_METADATA_VALIDATION_FAILED, 'Metadata validation failed'), - (PRESERVATION_STATE_VALIDATED_METADATA_UPDATED, 'Validated metadata updated'), - (PRESERVATION_STATE_VALIDATING_METADATA, 'Validating metadata'), - (PRESERVATION_STATE_VALID_METADATA, 'Valid metadata'), - (PRESERVATION_STATE_METADATA_CONFIRMED, 'Metadata confirmed'), - (PRESERVATION_STATE_ACCEPTED_TO_PAS, 'Accepted to digital preservation'), - (PRESERVATION_STATE_IN_PACKAGING_SERVICE, 'in packaging service'), - (PRESERVATION_STATE_PACKAGING_FAILED, 'Packaging failed'), - (PRESERVATION_STATE_SIP_IN_INGESTION, 'SIP sent to ingestion in digital preservation service'), - (PRESERVATION_STATE_IN_PAS, 'in digital preservation'), - (PRESERVATION_STATE_REJECTED_FROM_PAS, 'Rejected in digital preservation service'), - (PRESERVATION_STATE_IN_DISSEMINATION, 'in dissemination'), + (PRESERVATION_STATE_INITIALIZED, "Initialized"), + (PRESERVATION_STATE_PROPOSED, "Proposed for digital preservation"), + ( + PRESERVATION_STATE_TECHNICAL_METADATA_GENERATED, + "Technical metadata generated", + ), + ( + PRESERVATION_STATE_TECHNICAL_METADATA_GENERATED_FAILED, + "Technical metadata generation failed", + ), + (PRESERVATION_STATE_INVALID_METADATA, "Invalid metadata"), + (PRESERVATION_STATE_METADATA_VALIDATION_FAILED, "Metadata validation failed"), + (PRESERVATION_STATE_VALIDATED_METADATA_UPDATED, "Validated metadata updated"), + (PRESERVATION_STATE_VALIDATING_METADATA, "Validating metadata"), + (PRESERVATION_STATE_VALID_METADATA, "Valid metadata"), + (PRESERVATION_STATE_METADATA_CONFIRMED, "Metadata confirmed"), + (PRESERVATION_STATE_ACCEPTED_TO_PAS, "Accepted to digital preservation"), + (PRESERVATION_STATE_IN_PACKAGING_SERVICE, "in packaging service"), + (PRESERVATION_STATE_PACKAGING_FAILED, "Packaging failed"), + ( + PRESERVATION_STATE_SIP_IN_INGESTION, + "SIP sent to ingestion in digital preservation service", + ), + (PRESERVATION_STATE_IN_PAS, "in digital preservation"), + ( + PRESERVATION_STATE_REJECTED_FROM_PAS, + "Rejected in digital preservation service", + ), + (PRESERVATION_STATE_IN_DISSEMINATION, "in dissemination"), ) CUMULATIVE_STATE_NO = 0 @@ -231,121 +264,191 @@ class CatalogRecord(Common): CUMULATIVE_STATE_CLOSED = 2 CUMULATIVE_STATE_CHOICES = ( - (CUMULATIVE_STATE_NO, 'no'), - (CUMULATIVE_STATE_YES, 'yes'), - (CUMULATIVE_STATE_CLOSED, 'closed') + (CUMULATIVE_STATE_NO, "no"), + (CUMULATIVE_STATE_YES, "yes"), + (CUMULATIVE_STATE_CLOSED, "closed"), ) - STATE_PUBLISHED = 'published' - STATE_DRAFT = 'draft' + STATE_PUBLISHED = "published" + STATE_DRAFT = "draft" - STATE_CHOICES = ( - (STATE_PUBLISHED, 'published'), - (STATE_DRAFT, 'draft') - ) + STATE_CHOICES = ((STATE_PUBLISHED, "published"), (STATE_DRAFT, "draft")) # MODEL FIELD DEFINITIONS # alternate_record_set = models.ForeignKey( - AlternateRecordSet, on_delete=models.SET_NULL, null=True, related_name='records', - help_text='Records which are duplicates of this record, but in another catalog.') + AlternateRecordSet, + on_delete=models.SET_NULL, + null=True, + related_name="records", + help_text="Records which are duplicates of this record, but in another catalog.", + ) - contract = models.ForeignKey(Contract, null=True, on_delete=models.DO_NOTHING, related_name='records') + contract = models.ForeignKey( + Contract, null=True, on_delete=models.DO_NOTHING, related_name="records" + ) - data_catalog = models.ForeignKey(DataCatalog, on_delete=models.DO_NOTHING, related_name='records') + data_catalog = models.ForeignKey( + DataCatalog, on_delete=models.DO_NOTHING, related_name="records" + ) state = models.CharField( choices=STATE_CHOICES, default=STATE_DRAFT, max_length=200, - help_text='Publishing state (published / draft) of the dataset.' + help_text="Publishing state (published / draft) of the dataset.", ) dataset_group_edit = models.CharField( - max_length=200, blank=True, null=True, - help_text='Group which is allowed to edit the dataset in this catalog record.') + max_length=200, + blank=True, + null=True, + help_text="Group which is allowed to edit the dataset in this catalog record.", + ) deprecated = models.BooleanField( - default=False, help_text='Is True when files attached to a dataset have been deleted in IDA.') + default=False, + help_text="Is True when files attached to a dataset have been deleted in IDA.", + ) use_doi_for_published = models.BooleanField( default=None, blank=True, null=True, - help_text='Is True when "Use_DOI" field is checked in Qvain Light for draft.') + help_text='Is True when "Use_DOI" field is checked in Qvain Light for draft.', + ) date_deprecated = models.DateTimeField(null=True) - _directory_data = JSONField(null=True, help_text='Stores directory data related to browsing files and directories') + _directory_data = JSONField( + null=True, + help_text="Stores directory data related to browsing files and directories", + ) - files = models.ManyToManyField(File, related_query_name='record') + files = models.ManyToManyField(File, related_query_name="record") identifier = models.CharField(max_length=200, unique=True, null=False) mets_object_identifier = ArrayField(models.CharField(max_length=200), null=True) - metadata_owner_org = models.CharField(max_length=200, null=True, - help_text='Actually non-nullable, but is derived from field metadata_provider_org if omitted.') + metadata_owner_org = models.CharField( + max_length=200, + null=True, + help_text="Actually non-nullable, but is derived from field metadata_provider_org if omitted.", + ) - metadata_provider_org = models.CharField(max_length=200, null=False, help_text='Non-modifiable after creation') + metadata_provider_org = models.CharField( + max_length=200, null=False, help_text="Non-modifiable after creation" + ) - metadata_provider_user = models.CharField(max_length=200, null=False, help_text='Non-modifiable after creation') + metadata_provider_user = models.CharField( + max_length=200, null=False, help_text="Non-modifiable after creation" + ) - editor = JSONField(null=True, help_text='Editor specific fields, such as owner_id, modified, record_identifier') + editor = JSONField( + null=True, + help_text="Editor specific fields, such as owner_id, modified, record_identifier", + ) - preservation_dataset_version = models.OneToOneField('self', on_delete=models.DO_NOTHING, null=True, - related_name='preservation_dataset_origin_version', - help_text='Link between a PAS-stored dataset and the originating dataset.') + preservation_dataset_version = models.OneToOneField( + "self", + on_delete=models.DO_NOTHING, + null=True, + related_name="preservation_dataset_origin_version", + help_text="Link between a PAS-stored dataset and the originating dataset.", + ) preservation_description = models.CharField( - max_length=200, blank=True, null=True, help_text='Reason for accepting or rejecting PAS proposal.') + max_length=200, + blank=True, + null=True, + help_text="Reason for accepting or rejecting PAS proposal.", + ) preservation_reason_description = models.CharField( - max_length=200, blank=True, null=True, help_text='Reason for PAS proposal from the user.') + max_length=200, + blank=True, + null=True, + help_text="Reason for PAS proposal from the user.", + ) preservation_state = models.IntegerField( - choices=PRESERVATION_STATE_CHOICES, default=PRESERVATION_STATE_INITIALIZED, help_text='Record state in PAS.') + choices=PRESERVATION_STATE_CHOICES, + default=PRESERVATION_STATE_INITIALIZED, + help_text="Record state in PAS.", + ) - preservation_state_modified = models.DateTimeField(null=True, help_text='Date of last preservation state change.') + preservation_state_modified = models.DateTimeField( + null=True, help_text="Date of last preservation state change." + ) preservation_identifier = models.CharField(max_length=200, unique=True, null=True) research_dataset = JSONField() - next_draft = models.OneToOneField('self', on_delete=models.SET_NULL, null=True, - related_name='draft_of', - help_text='A draft of the next changes to be published on this dataset, in order to be able ' - 'to save progress, and continue later. Is created from a published dataset. ' - 'When the draft is published, changes are saved on top of the original dataset, ' - 'and the draft record is destroyed.') + next_draft = models.OneToOneField( + "self", + on_delete=models.SET_NULL, + null=True, + related_name="draft_of", + help_text="A draft of the next changes to be published on this dataset, in order to be able " + "to save progress, and continue later. Is created from a published dataset. " + "When the draft is published, changes are saved on top of the original dataset, " + "and the draft record is destroyed.", + ) - next_dataset_version = models.OneToOneField('self', on_delete=models.DO_NOTHING, null=True, - related_name='+') + next_dataset_version = models.OneToOneField( + "self", on_delete=models.DO_NOTHING, null=True, related_name="+" + ) - previous_dataset_version = models.OneToOneField('self', on_delete=models.DO_NOTHING, null=True, - related_name='+') + previous_dataset_version = models.OneToOneField( + "self", on_delete=models.DO_NOTHING, null=True, related_name="+" + ) dataset_version_set = models.ForeignKey( - DatasetVersionSet, on_delete=models.DO_NOTHING, null=True, related_name='records', - help_text='Records which are different dataset versions of each other.') + DatasetVersionSet, + on_delete=models.DO_NOTHING, + null=True, + related_name="records", + help_text="Records which are different dataset versions of each other.", + ) - cumulative_state = models.IntegerField(choices=CUMULATIVE_STATE_CHOICES, default=CUMULATIVE_STATE_NO) + cumulative_state = models.IntegerField( + choices=CUMULATIVE_STATE_CHOICES, default=CUMULATIVE_STATE_NO + ) - date_cumulation_started = models.DateTimeField(null=True, help_text='Date when cumulative_state was set to YES.') + date_cumulation_started = models.DateTimeField( + null=True, help_text="Date when cumulative_state was set to YES." + ) - date_cumulation_ended = models.DateTimeField(null=True, help_text='Date when cumulative_state was set to CLOSED.') + date_cumulation_ended = models.DateTimeField( + null=True, help_text="Date when cumulative_state was set to CLOSED." + ) - date_last_cumulative_addition = models.DateTimeField(null=True, default=None, - help_text='Date of last file addition while actively cumulative.') + date_last_cumulative_addition = models.DateTimeField( + null=True, + default=None, + help_text="Date of last file addition while actively cumulative.", + ) - access_granter = JSONField(null=True, default=None, - help_text='Stores data of REMS user who is currently granting access to this dataset') + access_granter = JSONField( + null=True, + default=None, + help_text="Stores data of REMS user who is currently granting access to this dataset", + ) - rems_identifier = models.CharField(max_length=200, null=True, default=None, - help_text='Defines corresponding catalog item in REMS service') + rems_identifier = models.CharField( + max_length=200, + null=True, + default=None, + help_text="Defines corresponding catalog item in REMS service", + ) - api_meta = JSONField(null=True, default=dict, - help_text='Saves api related info about the dataset. E.g. api version') + api_meta = JSONField( + null=True, + default=dict, + help_text="Saves api related info about the dataset. E.g. api version", + ) # END OF MODEL FIELD DEFINITIONS # @@ -380,39 +483,40 @@ class CatalogRecord(Common): class Meta: indexes = [ - models.Index(fields=['data_catalog']), - models.Index(fields=['identifier']), + models.Index(fields=["data_catalog"]), + models.Index(fields=["identifier"]), ] - ordering = ['id'] + ordering = ["id"] def __init__(self, *args, **kwargs): super(CatalogRecord, self).__init__(*args, **kwargs) self.track_fields( - 'api_meta', - 'access_granter', - 'cumulative_state', - 'data_catalog_id', - 'date_deprecated', - 'deprecated', - 'identifier', - 'metadata_owner_org', - 'metadata_provider_org', - 'metadata_provider_user', - 'preservation_state', - 'preservation_identifier', - 'research_dataset', - 'research_dataset.files', - 'research_dataset.directories', - 'research_dataset.total_files_byte_size', - 'research_dataset.total_remote_resources_byte_size', - 'research_dataset.metadata_version_identifier', - 'research_dataset.preferred_identifier', + "api_meta", + "access_granter", + "cumulative_state", + "data_catalog_id", + "date_deprecated", + "deprecated", + "identifier", + "metadata_owner_org", + "metadata_provider_org", + "metadata_provider_user", + "preservation_state", + "preservation_identifier", + "research_dataset", + "research_dataset.files", + "research_dataset.directories", + "research_dataset.total_files_byte_size", + "research_dataset.total_remote_resources_byte_size", + "research_dataset.metadata_version_identifier", + "research_dataset.preferred_identifier", ) from metax_api.api.rest.base.serializers import CatalogRecordSerializer + self.serializer_class = CatalogRecordSerializer self.api_version = 1 - def print_files(self): # pragma: no cover + def print_files(self): # pragma: no cover for f in self.files.all(): print(f) @@ -421,23 +525,29 @@ def user_has_access(self, request): In the future, will probably be more involved checking... """ if request.user.is_service: - if request.method == 'GET': + if request.method == "GET": return True - if not self._check_catalog_permissions(self.data_catalog.catalog_record_group_edit, - self.data_catalog.catalog_record_services_edit, request): + if not self._check_catalog_permissions( + self.data_catalog.catalog_record_group_edit, + self.data_catalog.catalog_record_services_edit, + request, + ): return False return True elif request.method in READ_METHODS: - if request.user.username is None: # unauthenticated user + if request.user.username is None: # unauthenticated user if self.state == self.STATE_PUBLISHED: return True else: raise Http404 - else: # enduser + else: # enduser if self.state == self.STATE_PUBLISHED: return True - elif self.state == self.STATE_DRAFT and self.metadata_provider_user == request.user.username: + elif ( + self.state == self.STATE_DRAFT + and self.metadata_provider_user == request.user.username + ): return True else: raise Http404 @@ -447,13 +557,15 @@ def user_has_access(self, request): def user_is_owner(self, request): if self.state == self.STATE_DRAFT and self.metadata_provider_user != request.user.username: - _logger.debug('404 due to state == draft and metadata_provider_user != request.user.username') - _logger.debug('metadata_provider_user = %s', self.metadata_provider_user) - _logger.debug('request.user.username = %s', request.user.username) + _logger.debug( + "404 due to state == draft and metadata_provider_user != request.user.username" + ) + _logger.debug("metadata_provider_user = %s", self.metadata_provider_user) + _logger.debug("request.user.username = %s", request.user.username) raise Http404 - if self.editor and 'owner_id' in self.editor: - return request.user.username == self.editor['owner_id'] + if self.editor and "owner_id" in self.editor: + return request.user.username == self.editor["owner_id"] elif self.metadata_provider_user: return request.user.username == self.metadata_provider_user @@ -469,9 +581,12 @@ def user_is_privileged(self, request): :return: """ if request.user.is_service: - if request.method == 'GET': - if not self._check_catalog_permissions(self.data_catalog.catalog_record_group_read, - self.data_catalog.catalog_record_services_read, request): + if request.method == "GET": + if not self._check_catalog_permissions( + self.data_catalog.catalog_record_group_read, + self.data_catalog.catalog_record_services_read, + request, + ): return False else: return True @@ -497,64 +612,85 @@ def _check_catalog_permissions(self, catalog_groups, catalog_services, request=N if request: self.request = request - if not self.request: # pragma: no cover + if not self.request: # pragma: no cover # should only only happen when setting up test cases - assert executing_test_case(), 'only permitted when setting up testing conditions' + assert executing_test_case(), "only permitted when setting up testing conditions" return True if self.request.user.is_service: if catalog_services: - allowed_services = [i.lower() for i in catalog_services.split(',')] + allowed_services = [i.lower() for i in catalog_services.split(",")] from metax_api.services import AuthService - return AuthService.check_services_against_allowed_services(self.request, allowed_services) + + return AuthService.check_services_against_allowed_services( + self.request, allowed_services + ) return False elif not self.request.user.is_service: if catalog_groups: - allowed_groups = catalog_groups.split(',') + allowed_groups = catalog_groups.split(",") from metax_api.services import AuthService + return AuthService.check_user_groups_against_groups(self.request, allowed_groups) return True _logger.info( - 'Catalog {} is not belonging to any service or group '.format(self.data_catalog.catalog_json['identifier']) + "Catalog {} is not belonging to any service or group ".format( + self.data_catalog.catalog_json["identifier"] + ) ) return False def _access_type_is_open(self): from metax_api.services import CatalogRecordService as CRS - return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES['open'] + + return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES["open"] def _access_type_is_login(self): from metax_api.services import CatalogRecordService as CRS - return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES['login'] + + return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES["login"] def _access_type_is_embargo(self): from metax_api.services import CatalogRecordService as CRS - return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES['embargo'] + + return ( + CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES["embargo"] + ) def _access_type_is_permit(self): from metax_api.services import CatalogRecordService as CRS - return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES['permit'] + + return CRS.get_research_dataset_access_type(self.research_dataset) == ACCESS_TYPES["permit"] def _access_type_was_permit(self): from metax_api.services import CatalogRecordService as CRS - return CRS.get_research_dataset_access_type(self._initial_data['research_dataset']) == ACCESS_TYPES['permit'] + + return ( + CRS.get_research_dataset_access_type(self._initial_data["research_dataset"]) + == ACCESS_TYPES["permit"] + ) def _embargo_is_available(self): - if not self.research_dataset.get('access_rights', {}).get('available', False): + if not self.research_dataset.get("access_rights", {}).get("available", False): return False try: return get_tz_aware_now_without_micros() >= parse_timestamp_string_to_tz_aware_datetime( - self.research_dataset.get('access_rights', {}).get('available', {})) + self.research_dataset.get("access_rights", {}).get("available", {}) + ) except Exception as e: _logger.error(e) return False def authorized_to_see_catalog_record_files(self, request): - return self.user_is_privileged(request) or self._access_type_is_open() or self._access_type_is_login() or \ - (self._access_type_is_embargo() and self._embargo_is_available()) + return ( + self.user_is_privileged(request) + or self._access_type_is_open() + or self._access_type_is_login() + or (self._access_type_is_embargo() and self._embargo_is_available()) + ) def save(self, *args, **kwargs): """ @@ -562,7 +698,7 @@ def save(self, *args, **kwargs): unknown keyword arguments. """ if self._operation_is_create(): - self._pre_create_operations(pid_type=kwargs.pop('pid_type', None)) + self._pre_create_operations(pid_type=kwargs.pop("pid_type", None)) super(CatalogRecord, self).save(*args, **kwargs) self._post_create_operations() else: @@ -582,42 +718,52 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): """ actual_files_changed = False - files_to_add = file_changes['files_to_add'] - files_to_remove = file_changes['files_to_remove'] - files_to_keep = file_changes['files_to_keep'] - dirs_to_add_by_project = file_changes['dirs_to_add_by_project'] - dirs_to_remove_by_project = file_changes['dirs_to_remove_by_project'] - dirs_to_keep_by_project = file_changes['dirs_to_keep_by_project'] - - if files_to_add or files_to_remove or files_to_keep or dirs_to_add_by_project or \ - dirs_to_remove_by_project or dirs_to_keep_by_project: + files_to_add = file_changes["files_to_add"] + files_to_remove = file_changes["files_to_remove"] + files_to_keep = file_changes["files_to_keep"] + dirs_to_add_by_project = file_changes["dirs_to_add_by_project"] + dirs_to_remove_by_project = file_changes["dirs_to_remove_by_project"] + dirs_to_keep_by_project = file_changes["dirs_to_keep_by_project"] + + if ( + files_to_add + or files_to_remove + or files_to_keep + or dirs_to_add_by_project + or dirs_to_remove_by_project + or dirs_to_keep_by_project + ): # note: files_to_keep and dirs_to_keep_by_project are also included because we # want to create new version on some cumulative_state changes. if DEBUG: - _logger.debug('Detected the following file changes:') + _logger.debug("Detected the following file changes:") if files_to_keep: # sql to copy single files from the previous version to the new version. only copy those # files which have been listed in research_dataset.files - sql_copy_files_from_prev_version = ''' + sql_copy_files_from_prev_version = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, file_id from metax_api_catalogrecord_files as cr_f inner join metax_api_file as f on f.id = cr_f.file_id where catalogrecord_id = %s and file_id in %s - ''' - sql_params_copy_files = [new_record_id, old_record_id, tuple(files_to_keep)] + """ + sql_params_copy_files = [ + new_record_id, + old_record_id, + tuple(files_to_keep), + ] if DEBUG: - _logger.debug('File ids to keep: %s' % files_to_keep) + _logger.debug("File ids to keep: %s" % files_to_keep) if dirs_to_keep_by_project: # sql top copy files from entire directories. only copy files from the upper level dirs found # by processing research_dataset.directories. - sql_copy_dirs_from_prev_version = ''' + sql_copy_dirs_from_prev_version = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, file_id from metax_api_catalogrecord_files as cr_f @@ -631,21 +777,22 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): metax_api_catalogrecord_files cr_f where catalogrecord_id = %s ) - ''' + """ sql_params_copy_dirs = [new_record_id, old_record_id] copy_dirs_sql = [] for project, dir_paths in dirs_to_keep_by_project.items(): for dir_path in dir_paths: - copy_dirs_sql.append("(f.project_identifier = %s and f.file_path like (%s || '/%%'))") + copy_dirs_sql.append( + "(f.project_identifier = %s and f.file_path like (%s || '/%%'))" + ) sql_params_copy_dirs.extend([project, dir_path]) sql_params_copy_dirs.extend([new_record_id]) sql_copy_dirs_from_prev_version = sql_copy_dirs_from_prev_version.replace( - 'COMPARE_PROJECT_AND_FILE_PATHS', - ' or '.join(copy_dirs_sql) + "COMPARE_PROJECT_AND_FILE_PATHS", " or ".join(copy_dirs_sql) ) # ^ generates: # and ( @@ -657,17 +804,17 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): # ) if DEBUG: - _logger.debug('Directory paths to keep, by project:') + _logger.debug("Directory paths to keep, by project:") for project, dir_paths in dirs_to_keep_by_project.items(): - _logger.debug('\tProject: %s' % project) + _logger.debug("\tProject: %s" % project) for dir_path in dir_paths: - _logger.debug('\t\t%s' % dir_path) + _logger.debug("\t\t%s" % dir_path) if dirs_to_add_by_project: # sql to add new files by directory path that were not previously included. # also takes care of "path is already included by another dir, but i want to check if there # are new files to add in there" - sql_select_and_insert_files_by_dir_path = ''' + sql_select_and_insert_files_by_dir_path = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, f.id from metax_api_file as f @@ -680,36 +827,39 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): metax_api_catalogrecord_files cr_f where catalogrecord_id = %s ) - ''' + """ sql_params_insert_dirs = [new_record_id] add_dirs_sql = [] for project, dir_paths in dirs_to_add_by_project.items(): for dir_path in dir_paths: - add_dirs_sql.append("(f.project_identifier = %s and f.file_path like (%s || '/%%'))") + add_dirs_sql.append( + "(f.project_identifier = %s and f.file_path like (%s || '/%%'))" + ) sql_params_insert_dirs.extend([project, dir_path]) - sql_select_and_insert_files_by_dir_path = sql_select_and_insert_files_by_dir_path.replace( - 'COMPARE_PROJECT_AND_FILE_PATHS', - ' or '.join(add_dirs_sql) + sql_select_and_insert_files_by_dir_path = ( + sql_select_and_insert_files_by_dir_path.replace( + "COMPARE_PROJECT_AND_FILE_PATHS", " or ".join(add_dirs_sql) + ) ) sql_params_insert_dirs.extend([new_record_id]) if DEBUG: - _logger.debug('Directory paths to add, by project:') + _logger.debug("Directory paths to add, by project:") for project, dir_paths in dirs_to_add_by_project.items(): - _logger.debug('\tProject: %s' % project) + _logger.debug("\tProject: %s" % project) for dir_path in dir_paths: - _logger.debug('\t\t%s' % dir_path) + _logger.debug("\t\t%s" % dir_path) if files_to_add: # sql to add any new singular files which were not covered by any directory path # being added. also takes care of "path is already included by another dir, # but this file did not necessarily exist yet at that time, so add it in case # its a new file" - sql_insert_single_files = ''' + sql_insert_single_files = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, f.id from metax_api_file as f @@ -720,13 +870,17 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): metax_api_catalogrecord_files cr_f where catalogrecord_id = %s ) - ''' - sql_params_insert_single = [new_record_id, tuple(files_to_add), new_record_id] + """ + sql_params_insert_single = [ + new_record_id, + tuple(files_to_add), + new_record_id, + ] if DEBUG: - _logger.debug('File ids to add: %s' % files_to_add) + _logger.debug("File ids to add: %s" % files_to_add) - sql_detect_files_changed = ''' + sql_detect_files_changed = """ select exists( select a.file_id from metax_api_catalogrecord_files a where a.catalogrecord_id = %s except @@ -738,12 +892,12 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): except select b.file_id from metax_api_catalogrecord_files b where b.catalogrecord_id = %s ) as compare_old_to_new - ''' + """ sql_params_files_changed = [ new_record_id, old_record_id, old_record_id, - new_record_id + new_record_id, ] with connection.cursor() as cr: @@ -765,7 +919,9 @@ def _process_file_changes(self, file_changes, new_record_id, old_record_id): actual_files_changed = any(v[0] for v in cr.fetchall()) if DEBUG: - _logger.debug('Actual files changed during version change: %s' % str(actual_files_changed)) + _logger.debug( + "Actual files changed during version change: %s" % str(actual_files_changed) + ) else: # no files to specifically add or remove - do nothing. shouldnt even be here. the new record # created will be discarded @@ -785,14 +941,15 @@ def _find_file_changes(self): # not allowing file/dir updates via rest api for deprecated datasets will (hopefully) make # the versioning more robust if self.deprecated and any( - file_description_changes['files']['removed'] or - file_description_changes['files']['added'] or - file_description_changes['directories']['removed'] or - file_description_changes['directories']['added']): + file_description_changes["files"]["removed"] + or file_description_changes["files"]["added"] + or file_description_changes["directories"]["removed"] + or file_description_changes["directories"]["added"] + ): raise Http400( - 'Cannot add or remove files/directories from deprecated dataset. ' - 'Please use API /rpc/fix_deprecated to fix deprecation and to allow file modifications.' + "Cannot add or remove files/directories from deprecated dataset. " + "Please use API /rpc/fix_deprecated to fix deprecation and to allow file modifications." ) # after copying files from the previous version to the new version, these arrays hold any new @@ -820,146 +977,186 @@ def _find_file_changes(self): # permissions for changed files: no need to retrieve related project identifiers again changed_projects = defaultdict(set) - self._find_new_dirs_to_add(file_description_changes, - dirs_to_add_by_project, - dirs_to_remove_by_project, - dirs_to_keep_by_project) + self._find_new_dirs_to_add( + file_description_changes, + dirs_to_add_by_project, + dirs_to_remove_by_project, + dirs_to_keep_by_project, + ) - self._find_new_files_to_add(file_description_changes, - files_to_add, - files_to_remove, - files_to_keep, - changed_projects) + self._find_new_files_to_add( + file_description_changes, + files_to_add, + files_to_remove, + files_to_keep, + changed_projects, + ) # involved projects from single files (research_dataset.files) were accumulated # in the previous method, but for dirs, its just handy to get the keys from below # variables... for project_identifier in dirs_to_add_by_project.keys(): - changed_projects['files_added'].add(project_identifier) + changed_projects["files_added"].add(project_identifier) for project_identifier in dirs_to_remove_by_project.keys(): - changed_projects['files_removed'].add(project_identifier) + changed_projects["files_removed"].add(project_identifier) return { - 'files_to_add': files_to_add, - 'files_to_remove': files_to_remove, - 'files_to_keep': files_to_keep, - 'dirs_to_add_by_project': dirs_to_add_by_project, - 'dirs_to_remove_by_project': dirs_to_remove_by_project, - 'dirs_to_keep_by_project': dirs_to_keep_by_project, - 'changed_projects': changed_projects, + "files_to_add": files_to_add, + "files_to_remove": files_to_remove, + "files_to_keep": files_to_keep, + "dirs_to_add_by_project": dirs_to_add_by_project, + "dirs_to_remove_by_project": dirs_to_remove_by_project, + "dirs_to_keep_by_project": dirs_to_keep_by_project, + "changed_projects": changed_projects, } - def _find_new_dirs_to_add(self, file_description_changes, dirs_to_add_by_project, dirs_to_remove_by_project, - dirs_to_keep_by_project): + def _find_new_dirs_to_add( + self, + file_description_changes, + dirs_to_add_by_project, + dirs_to_remove_by_project, + dirs_to_keep_by_project, + ): """ Based on changes in research_metadata.directories (parameter file_description_changes), find out which directories should be kept when copying files from the previous version to the new version, and which new directories should be added. """ - assert 'directories' in file_description_changes + assert "directories" in file_description_changes - dir_identifiers = list(file_description_changes['directories']['added']) +\ - list(file_description_changes['directories']['removed']) + dir_identifiers = list(file_description_changes["directories"]["added"]) + list( + file_description_changes["directories"]["removed"] + ) - dir_details = Directory.objects.filter(identifier__in=dir_identifiers) \ - .values('project_identifier', 'identifier', 'directory_path') + dir_details = Directory.objects.filter(identifier__in=dir_identifiers).values( + "project_identifier", "identifier", "directory_path" + ) # skip deprecated datasets, since there might be deleted directories if len(dir_identifiers) != len(dir_details) and not self.deprecated: - existig_dirs = set( d['identifier'] for d in dir_details ) - missing_identifiers = [ d for d in dir_identifiers if d not in existig_dirs ] - raise ValidationError({'detail': ['the following directory identifiers were not found:\n%s' - % '\n'.join(missing_identifiers) ]}) + existig_dirs = set(d["identifier"] for d in dir_details) + missing_identifiers = [d for d in dir_identifiers if d not in existig_dirs] + raise ValidationError( + { + "detail": [ + "the following directory identifiers were not found:\n%s" + % "\n".join(missing_identifiers) + ] + } + ) for dr in dir_details: - if dr['identifier'] in file_description_changes['directories']['added']: + if dr["identifier"] in file_description_changes["directories"]["added"]: # include all new dirs found, to add files from an entirely new directory, # or to search an already existing directory for new files (and sub dirs). # as a result the actual set of files may or may not change. - dirs_to_add_by_project[dr['project_identifier']].append(dr['directory_path']) + dirs_to_add_by_project[dr["project_identifier"]].append(dr["directory_path"]) - elif dr['identifier'] in file_description_changes['directories']['removed']: - if not self._path_included_in_previous_metadata_version(dr['project_identifier'], dr['directory_path']): + elif dr["identifier"] in file_description_changes["directories"]["removed"]: + if not self._path_included_in_previous_metadata_version( + dr["project_identifier"], dr["directory_path"] + ): # only remove dirs that are not included by other directory paths. # as a result the actual set of files may change, if the path is not included # in the new directory additions - dirs_to_remove_by_project[dr['project_identifier']].append(dr['directory_path']) + dirs_to_remove_by_project[dr["project_identifier"]].append(dr["directory_path"]) # when keeping directories when copying, only the top level dirs are required top_dirs_by_project = self._get_top_level_parent_dirs_by_project( - file_description_changes['directories']['keep']) + file_description_changes["directories"]["keep"] + ) for project, dirs in top_dirs_by_project.items(): dirs_to_keep_by_project[project] = dirs - def _find_new_files_to_add(self, file_description_changes, files_to_add, files_to_remove, files_to_keep, - changed_projects): + def _find_new_files_to_add( + self, + file_description_changes, + files_to_add, + files_to_remove, + files_to_keep, + changed_projects, + ): """ Based on changes in research_metadata.files (parameter file_description_changes), find out which files should be kept when copying files from the previous version to the new version, and which new files should be added. """ - assert 'files' in file_description_changes + assert "files" in file_description_changes - add_and_keep_ids = list(file_description_changes['files']['added']) \ - + list(file_description_changes['files']['keep']) + add_and_keep_ids = list(file_description_changes["files"]["added"]) + list( + file_description_changes["files"]["keep"] + ) - add_and_keep = File.objects.filter(identifier__in=add_and_keep_ids) \ - .values('id', 'project_identifier', 'identifier', 'file_path') + add_and_keep = File.objects.filter(identifier__in=add_and_keep_ids).values( + "id", "project_identifier", "identifier", "file_path" + ) - removed_ids = list(file_description_changes['files']['removed']) + removed_ids = list(file_description_changes["files"]["removed"]) - removed = File.objects_unfiltered.filter(identifier__in=removed_ids) \ - .values('id', 'project_identifier', 'identifier', 'file_path') + removed = File.objects_unfiltered.filter(identifier__in=removed_ids).values( + "id", "project_identifier", "identifier", "file_path" + ) file_details = add_and_keep | removed if len(add_and_keep_ids) + len(removed_ids) != len(file_details) and not self.deprecated: - existig_files = set( f['identifier'] for f in file_details ) - missing_identifiers = [ f for f in add_and_keep_ids if f not in existig_files ] - missing_identifiers += [ f for f in removed_ids if f not in existig_files ] - raise ValidationError({'detail': ['the following file identifiers were not found:\n%s' - % '\n'.join(missing_identifiers) ]}) + existig_files = set(f["identifier"] for f in file_details) + missing_identifiers = [f for f in add_and_keep_ids if f not in existig_files] + missing_identifiers += [f for f in removed_ids if f not in existig_files] + raise ValidationError( + { + "detail": [ + "the following file identifiers were not found:\n%s" + % "\n".join(missing_identifiers) + ] + } + ) for f in file_details: - if f['identifier'] in file_description_changes['files']['added']: + if f["identifier"] in file_description_changes["files"]["added"]: # include all new files even if it already included by another directory's path, # to check later that it is not a file that was created later. # as a result the actual set of files may or may not change. - files_to_add.append(f['id']) - changed_projects['files_added'].add(f['project_identifier']) - elif f['identifier'] in file_description_changes['files']['removed']: - if not self._path_included_in_previous_metadata_version(f['project_identifier'], f['file_path']): + files_to_add.append(f["id"]) + changed_projects["files_added"].add(f["project_identifier"]) + elif f["identifier"] in file_description_changes["files"]["removed"]: + if not self._path_included_in_previous_metadata_version( + f["project_identifier"], f["file_path"] + ): # file is being removed. # path is not included by other directories in the previous version, # which means the actual set of files may change, if the path is not included # in the new directory additions - files_to_remove.append(f['id']) - changed_projects['files_removed'].add(f['project_identifier']) - elif f['identifier'] in file_description_changes['files']['keep']: - files_to_keep.append(f['id']) + files_to_remove.append(f["id"]) + changed_projects["files_removed"].add(f["project_identifier"]) + elif f["identifier"] in file_description_changes["files"]["keep"]: + files_to_keep.append(f["id"]) def _path_included_in_previous_metadata_version(self, project, path): """ Check if a path in a specific project is already included in the path of another directory included in the PREVIOUS VERSION dataset selected directories. """ - if not hasattr(self, '_previous_highest_level_dirs_by_project'): - if 'directories' not in self._initial_data['research_dataset']: + if not hasattr(self, "_previous_highest_level_dirs_by_project"): + if "directories" not in self._initial_data["research_dataset"]: return False dir_identifiers = [ - d['identifier'] for d in self._initial_data['research_dataset']['directories'] + d["identifier"] for d in self._initial_data["research_dataset"]["directories"] ] - self._previous_highest_level_dirs_by_project = self._get_top_level_parent_dirs_by_project(dir_identifiers) + self._previous_highest_level_dirs_by_project = ( + self._get_top_level_parent_dirs_by_project(dir_identifiers) + ) return any( - True for dr_path in self._previous_highest_level_dirs_by_project.get(project, []) - if path != dr_path and path.startswith('%s/' % dr_path) + True + for dr_path in self._previous_highest_level_dirs_by_project.get(project, []) + if path != dr_path and path.startswith("%s/" % dr_path) ) def delete(self, *args, **kwargs): if self.state == self.STATE_DRAFT: - _logger.info('Deleting draft dataset %s permanently' % self.identifier) + _logger.info("Deleting draft dataset %s permanently" % self.identifier) if self.previous_dataset_version: self.previous_dataset_version.next_dataset_version = None @@ -972,24 +1169,25 @@ def delete(self, *args, **kwargs): elif self.state == self.STATE_PUBLISHED: if self.has_alternate_records(): self._remove_from_alternate_record_set() - if is_metax_generated_doi_identifier(self.research_dataset['preferred_identifier']): - self.add_post_request_callable(DataciteDOIUpdate(self, self.research_dataset['preferred_identifier'], - 'delete')) + if is_metax_generated_doi_identifier(self.research_dataset["preferred_identifier"]): + self.add_post_request_callable( + DataciteDOIUpdate(self, self.research_dataset["preferred_identifier"], "delete") + ) - if self._dataset_has_rems_managed_access() and settings.REMS['ENABLED']: - self._pre_rems_deletion('dataset deletion') - super().save(update_fields=['rems_identifier', 'access_granter']) + if self._dataset_has_rems_managed_access() and settings.REMS["ENABLED"]: + self._pre_rems_deletion("dataset deletion") + super().save(update_fields=["rems_identifier", "access_granter"]) - self.add_post_request_callable(RabbitMQPublishRecord(self, 'delete')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "delete")) log_args = { - 'event': 'dataset_deleted', - 'user_id': self.user_modified, - 'catalogrecord': { - 'identifier': self.identifier, - 'preferred_identifier': self.preferred_identifier, - 'data_catalog': self.data_catalog.catalog_json['identifier'], - } + "event": "dataset_deleted", + "user_id": self.user_modified, + "catalogrecord": { + "identifier": self.identifier, + "preferred_identifier": self.preferred_identifier, + "data_catalog": self.data_catalog.catalog_json["identifier"], + }, } if self.catalog_is_legacy(): # delete permanently instead of only marking as 'removed' @@ -998,8 +1196,8 @@ def delete(self, *args, **kwargs): return crid else: super().remove(*args, **kwargs) - log_args['catalogrecord']['date_removed'] = datetime_to_str(self.date_removed) - log_args['catalogrecord']['date_modified'] = datetime_to_str(self.date_modified) + log_args["catalogrecord"]["date_removed"] = datetime_to_str(self.date_removed) + log_args["catalogrecord"]["date_modified"] = datetime_to_str(self.date_modified) self.add_post_request_callable(DelayedLog(**log_args)) return self.id @@ -1008,26 +1206,28 @@ def deprecate(self, timestamp=None): self.deprecated = True self.date_deprecated = self.date_modified = timestamp or get_tz_aware_now_without_micros() - if self._dataset_has_rems_managed_access() and settings.REMS['ENABLED']: - self._pre_rems_deletion('dataset deprecation') - super().save(update_fields=['rems_identifier', 'access_granter']) + if self._dataset_has_rems_managed_access() and settings.REMS["ENABLED"]: + self._pre_rems_deletion("dataset deprecation") + super().save(update_fields=["rems_identifier", "access_granter"]) - super().save(update_fields=['deprecated', 'date_deprecated', 'date_modified']) - self.add_post_request_callable(DelayedLog( - event='dataset_deprecated', - catalogrecord={ - 'identifier': self.identifier, - 'date_deprecated': datetime_to_str(self.date_deprecated), - } - )) + super().save(update_fields=["deprecated", "date_deprecated", "date_modified"]) + self.add_post_request_callable( + DelayedLog( + event="dataset_deprecated", + catalogrecord={ + "identifier": self.identifier, + "date_deprecated": datetime_to_str(self.date_deprecated), + }, + ) + ) @property def identifiers_dict(self): try: return { - 'id': self.id, - 'identifier': self.identifier, - 'preferred_identifier': self.research_dataset['preferred_identifier'], + "id": self.id, + "identifier": self.identifier, + "preferred_identifier": self.research_dataset["preferred_identifier"], } except: return {} @@ -1035,14 +1235,14 @@ def identifiers_dict(self): @property def preferred_identifier(self): try: - return self.research_dataset['preferred_identifier'] + return self.research_dataset["preferred_identifier"] except: return None @property def metadata_version_identifier(self): try: - return self.research_dataset['metadata_version_identifier'] + return self.research_dataset["metadata_version_identifier"] except: return None @@ -1053,32 +1253,38 @@ def preservation_dataset_origin_version_exists(self): attribute does not exist on the instance at all, until the field has a proper value. Django weirdness? """ - return hasattr(self, 'preservation_dataset_origin_version') + return hasattr(self, "preservation_dataset_origin_version") def catalog_versions_datasets(self): - return self.data_catalog.catalog_json.get('dataset_versioning', False) is True + return self.data_catalog.catalog_json.get("dataset_versioning", False) is True def catalog_is_harvested(self): - return self.data_catalog.catalog_json.get('harvested', False) is True + return self.data_catalog.catalog_json.get("harvested", False) is True def catalog_is_legacy(self): - return self.data_catalog.catalog_json['identifier'] in LEGACY_CATALOGS + return self.data_catalog.catalog_json["identifier"] in LEGACY_CATALOGS def catalog_is_ida(self, data=None): if data: - return DataCatalog.objects.get(pk=data['data_catalog_id']).catalog_json['identifier'] == IDA_CATALOG - return self.data_catalog.catalog_json['identifier'] == IDA_CATALOG + return ( + DataCatalog.objects.get(pk=data["data_catalog_id"]).catalog_json["identifier"] + == IDA_CATALOG + ) + return self.data_catalog.catalog_json["identifier"] == IDA_CATALOG def catalog_is_att(self, data=None): if data: - return DataCatalog.objects.get(pk=data['data_catalog_id']).catalog_json['identifier'] == ATT_CATALOG - return self.data_catalog.catalog_json['identifier'] == ATT_CATALOG + return ( + DataCatalog.objects.get(pk=data["data_catalog_id"]).catalog_json["identifier"] + == ATT_CATALOG + ) + return self.data_catalog.catalog_json["identifier"] == ATT_CATALOG def catalog_is_pas(self): - return self.data_catalog.catalog_json['identifier'] == PAS_CATALOG + return self.data_catalog.catalog_json["identifier"] == PAS_CATALOG def catalog_is_dft(self): - return self.data_catalog.catalog_json['identifier'] == DFT_CATALOG + return self.data_catalog.catalog_json["identifier"] == DFT_CATALOG def is_published(self): return self.state == self.STATE_PUBLISHED @@ -1092,21 +1298,23 @@ def _save_as_draft(self): def _generate_issued_date(self): if not (self.catalog_is_harvested()): - if 'issued' not in self.research_dataset: + if "issued" not in self.research_dataset: current_time = get_tz_aware_now_without_micros() - self.research_dataset['issued'] = datetime_to_str(current_time)[0:10] + self.research_dataset["issued"] = datetime_to_str(current_time)[0:10] def get_metadata_version_listing(self): entries = [] for entry in self.research_dataset_versions.all(): - entries.append({ - 'id': entry.id, - 'date_created': entry.date_created, - 'metadata_version_identifier': entry.metadata_version_identifier, - }) + entries.append( + { + "id": entry.id, + "date_created": entry.date_created, + "metadata_version_identifier": entry.metadata_version_identifier, + } + ) if entry.stored_to_pas: # dont include null values - entries[-1]['stored_to_pas'] = entry.stored_to_pas + entries[-1]["stored_to_pas"] = entry.stored_to_pas return entries def _get_user_info_for_rems(self): @@ -1116,23 +1324,23 @@ def _get_user_info_for_rems(self): if self.request.user.is_service: # use constant keys for easier validation user_info = { - 'userid': self.access_granter.get('userid'), - 'name': self.access_granter.get('name'), - 'email': self.access_granter.get('email') + "userid": self.access_granter.get("userid"), + "name": self.access_granter.get("name"), + "email": self.access_granter.get("email"), } else: # end user api user_info = { - 'userid': self.request.user.token.get('CSCUserName'), - 'name': self.request.user.token.get('displayName'), - 'email': self.request.user.token.get('email') + "userid": self.request.user.token.get("CSCUserName"), + "name": self.request.user.token.get("displayName"), + "email": self.request.user.token.get("email"), } if any([v is None for v in user_info.values()]): - raise Http400('Could not find the needed user information for REMS') + raise Http400("Could not find the needed user information for REMS") if not all([isinstance(v, str) for v in user_info.values()]): - raise Http400('user information fields must be string') + raise Http400("user information fields must be string") return user_info @@ -1140,32 +1348,38 @@ def _validate_for_rems(self): """ Ensures that all necessary information for REMS access """ - if self._access_type_is_permit() and not self.research_dataset['access_rights'].get('license', False): - raise Http400('You must define license for dataset in order to make it REMS manageable') + if self._access_type_is_permit() and not self.research_dataset["access_rights"].get( + "license", False + ): + raise Http400("You must define license for dataset in order to make it REMS manageable") if self.request.user.is_service and not self.access_granter: - raise Http400('Missing access_granter') + raise Http400("Missing access_granter") def _assert_api_version(self): if not self.api_meta: # This should be possible only for test data - _logger.warning(f'no api_meta found for {self.identifier}') + _logger.warning(f"no api_meta found for {self.identifier}") return - if not self.api_meta['version'] == self.api_version: - raise Http400('Please use the correct api version to edit this dataset') + if not self.api_meta["version"] == self.api_version: + raise Http400("Please use the correct api version to edit this dataset") def _set_api_version(self): # TODO: Can possibly be deleted when v1 api is removed from use and all # datasets have been migrated to v2 - self.api_meta['version'] = self.api_version + self.api_meta["version"] = self.api_version def _pre_create_operations(self, pid_type=None): - if not self._check_catalog_permissions(self.data_catalog.catalog_record_group_create, - self.data_catalog.catalog_record_services_create): - raise Http403({ 'detail': [ 'You are not permitted to create datasets in this data catalog.' ]}) + if not self._check_catalog_permissions( + self.data_catalog.catalog_record_group_create, + self.data_catalog.catalog_record_services_create, + ): + raise Http403( + {"detail": ["You are not permitted to create datasets in this data catalog."]} + ) - self.research_dataset['metadata_version_identifier'] = generate_uuid_identifier() + self.research_dataset["metadata_version_identifier"] = generate_uuid_identifier() self.identifier = generate_uuid_identifier() if self.catalog_is_pas(): @@ -1180,56 +1394,63 @@ def _pre_create_operations(self, pid_type=None): pass elif self.catalog_is_legacy(): - if 'preferred_identifier' not in self.research_dataset: - raise ValidationError({ - 'detail': [ - 'Selected catalog %s is a legacy catalog. Preferred identifiers are not ' - 'automatically generated for datasets stored in legacy catalogs, nor is ' - 'their uniqueness enforced. Please provide a value for dataset field ' - 'preferred_identifier.' - % self.data_catalog.catalog_json['identifier'] - ] - }) + if "preferred_identifier" not in self.research_dataset: + raise ValidationError( + { + "detail": [ + "Selected catalog %s is a legacy catalog. Preferred identifiers are not " + "automatically generated for datasets stored in legacy catalogs, nor is " + "their uniqueness enforced. Please provide a value for dataset field " + "preferred_identifier." % self.data_catalog.catalog_json["identifier"] + ] + } + ) _logger.info( - 'Catalog %s is a legacy catalog - not generating pid' - % self.data_catalog.catalog_json['identifier'] + "Catalog %s is a legacy catalog - not generating pid" + % self.data_catalog.catalog_json["identifier"] ) elif self._save_as_draft(): self.state = self.STATE_DRAFT - self.research_dataset['preferred_identifier'] = 'draft:%s' % self.identifier + self.research_dataset["preferred_identifier"] = "draft:%s" % self.identifier if self._get_preferred_identifier_type_from_request() == IdentifierType.DOI: self.use_doi_for_published = True else: self.use_doi_for_published = False else: if pref_id_type == IdentifierType.URN: - self.research_dataset['preferred_identifier'] = generate_uuid_identifier(urn_prefix=True) + self.research_dataset["preferred_identifier"] = generate_uuid_identifier( + urn_prefix=True + ) elif pref_id_type == IdentifierType.DOI: if not (self.catalog_is_ida() or self.catalog_is_pas()): raise Http400("Cannot create DOI for other than datasets in IDA or PAS catalog") - _logger.debug('pref_id_type == %s, generating doi' % pref_id_type) + _logger.debug("pref_id_type == %s, generating doi" % pref_id_type) doi_id = generate_doi_identifier() - self.research_dataset['preferred_identifier'] = doi_id + self.research_dataset["preferred_identifier"] = doi_id self.preservation_identifier = doi_id else: - _logger.debug("Identifier type not specified in the request. Using URN identifier for pref id") - self.research_dataset['preferred_identifier'] = generate_uuid_identifier(urn_prefix=True) + _logger.debug( + "Identifier type not specified in the request. Using URN identifier for pref id" + ) + self.research_dataset["preferred_identifier"] = generate_uuid_identifier( + urn_prefix=True + ) if not self.metadata_owner_org: # field metadata_owner_org is optional, but must be set. in case it is omitted, # derive from metadata_provider_org. self.metadata_owner_org = self.metadata_provider_org - if 'remote_resources' in self.research_dataset: + if "remote_resources" in self.research_dataset: self._calculate_total_remote_resources_byte_size() if self.cumulative_state == self.CUMULATIVE_STATE_CLOSED: - raise Http400('Cannot create cumulative dataset with state closed') + raise Http400("Cannot create cumulative dataset with state closed") elif self.cumulative_state == self.CUMULATIVE_STATE_YES: if self.preservation_state > self.PRESERVATION_STATE_INITIALIZED: - raise Http400('Dataset cannot be cumulative if it is in PAS process') + raise Http400("Dataset cannot be cumulative if it is in PAS process") self.date_cumulation_started = self.date_created @@ -1238,12 +1459,12 @@ def _pre_create_operations(self, pid_type=None): self._set_api_version() def _post_create_operations(self): - if 'files' in self.research_dataset or 'directories' in self.research_dataset: + if "files" in self.research_dataset or "directories" in self.research_dataset: # files must be added after the record itself has been created, to be able # to insert into a many2many relation. self.files.add(*self._get_dataset_selected_file_ids()) self._calculate_total_files_byte_size() - super().save(update_fields=['research_dataset']) # save byte size calculation + super().save(update_fields=["research_dataset"]) # save byte size calculation self.calculate_directory_byte_sizes_and_file_counts() if self.cumulative_state == self.CUMULATIVE_STATE_YES: @@ -1265,138 +1486,153 @@ def _post_create_operations(self): dvs.save() dvs.records.add(self) - if (get_identifier_type(self.preferred_identifier) == IdentifierType.DOI or - self.use_doi_for_published is True): + if ( + get_identifier_type(self.preferred_identifier) == IdentifierType.DOI + or self.use_doi_for_published is True + ): self._validate_cr_against_datacite_schema() - if is_metax_generated_doi_identifier(self.research_dataset['preferred_identifier']): - self.add_post_request_callable(DataciteDOIUpdate(self, - self.research_dataset['preferred_identifier'], 'create')) + if is_metax_generated_doi_identifier(self.research_dataset["preferred_identifier"]): + self.add_post_request_callable( + DataciteDOIUpdate(self, self.research_dataset["preferred_identifier"], "create") + ) - if self._dataset_has_rems_managed_access() and settings.REMS['ENABLED']: + if self._dataset_has_rems_managed_access() and settings.REMS["ENABLED"]: self._pre_rems_creation() - super().save(update_fields=['rems_identifier', 'access_granter']) + super().save(update_fields=["rems_identifier", "access_granter"]) super().save() - self.add_post_request_callable(RabbitMQPublishRecord(self, 'create')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "create")) _logger.info( - 'Created a new ' - % (self.id, self.identifier, self.preferred_identifier) + "Created a new " % (self.id, self.identifier, self.preferred_identifier) ) log_args = { - 'catalogrecord': { - 'identifier': self.identifier, - 'preferred_identifier': self.preferred_identifier, - 'data_catalog': self.data_catalog.catalog_json['identifier'], - 'date_created': datetime_to_str(self.date_created), - 'metadata_owner_org': self.metadata_owner_org, + "catalogrecord": { + "identifier": self.identifier, + "preferred_identifier": self.preferred_identifier, + "data_catalog": self.data_catalog.catalog_json["identifier"], + "date_created": datetime_to_str(self.date_created), + "metadata_owner_org": self.metadata_owner_org, }, - 'user_id': self.user_created or self.service_created, + "user_id": self.user_created or self.service_created, } if self.previous_dataset_version: - log_args['event'] = 'dataset_version_created' - log_args['catalogrecord']['previous_version_preferred_identifier'] \ - = self.previous_dataset_version.preferred_identifier + log_args["event"] = "dataset_version_created" + log_args["catalogrecord"][ + "previous_version_preferred_identifier" + ] = self.previous_dataset_version.preferred_identifier else: - log_args['event'] = 'dataset_created' + log_args["event"] = "dataset_created" self.add_post_request_callable(DelayedLog(**log_args)) def _pre_update_operations(self): - if not self._check_catalog_permissions(self.data_catalog.catalog_record_group_edit, - self.data_catalog.catalog_record_services_edit): - raise Http403({ 'detail': [ 'You are not permitted to edit datasets in this data catalog.' ]}) + if not self._check_catalog_permissions( + self.data_catalog.catalog_record_group_edit, + self.data_catalog.catalog_record_services_edit, + ): + raise Http403( + {"detail": ["You are not permitted to edit datasets in this data catalog."]} + ) - if self.field_changed('api_meta'): - self.api_meta = self._initial_data['api_meta'] + if self.field_changed("api_meta"): + self.api_meta = self._initial_data["api_meta"] # possibly raises 400 self._assert_api_version() - if self.field_changed('identifier'): + if self.field_changed("identifier"): # read-only - self.identifier = self._initial_data['identifier'] + self.identifier = self._initial_data["identifier"] - if self.field_changed('research_dataset.metadata_version_identifier'): + if self.field_changed("research_dataset.metadata_version_identifier"): # read-only - self.research_dataset['metadata_version_identifier'] = \ - self._initial_data['research_dataset']['metadata_version_identifier'] + self.research_dataset["metadata_version_identifier"] = self._initial_data[ + "research_dataset" + ]["metadata_version_identifier"] - if self.field_changed('research_dataset.preferred_identifier'): + if self.field_changed("research_dataset.preferred_identifier"): if not (self.catalog_is_harvested() or self.catalog_is_legacy()): - raise Http400("Cannot change preferred_identifier in datasets in non-harvested catalogs") + raise Http400( + "Cannot change preferred_identifier in datasets in non-harvested catalogs" + ) - if self.field_changed('research_dataset.total_files_byte_size'): + if self.field_changed("research_dataset.total_files_byte_size"): # read-only - if 'total_files_byte_size' in self._initial_data['research_dataset']: - self.research_dataset['total_files_byte_size'] = \ - self._initial_data['research_dataset']['total_files_byte_size'] + if "total_files_byte_size" in self._initial_data["research_dataset"]: + self.research_dataset["total_files_byte_size"] = self._initial_data[ + "research_dataset" + ]["total_files_byte_size"] else: - self.research_dataset.pop('total_files_byte_size') + self.research_dataset.pop("total_files_byte_size") - if self.field_changed('research_dataset.total_remote_resources_byte_size'): + if self.field_changed("research_dataset.total_remote_resources_byte_size"): # read-only - if 'total_remote_resources_byte_size' in self._initial_data['research_dataset']: - self.research_dataset['total_remote_resources_byte_size'] = \ - self._initial_data['research_dataset']['total_remote_resources_byte_size'] + if "total_remote_resources_byte_size" in self._initial_data["research_dataset"]: + self.research_dataset["total_remote_resources_byte_size"] = self._initial_data[ + "research_dataset" + ]["total_remote_resources_byte_size"] else: - self.research_dataset.pop('total_remote_resources_byte_size') + self.research_dataset.pop("total_remote_resources_byte_size") - if self.field_changed('preservation_state'): + if self.field_changed("preservation_state"): if self.cumulative_state == self.CUMULATIVE_STATE_YES: - raise Http400('Changing preservation state is not allowed while dataset cumulation is active') + raise Http400( + "Changing preservation state is not allowed while dataset cumulation is active" + ) self._handle_preservation_state_changed() - if self.field_changed('deprecated') and self._initial_data['deprecated'] is True: + if self.field_changed("deprecated") and self._initial_data["deprecated"] is True: raise Http400("Cannot change dataset deprecation state from true to false") - if self.field_changed('date_deprecated') and self._initial_data['date_deprecated']: + if self.field_changed("date_deprecated") and self._initial_data["date_deprecated"]: raise Http400("Cannot change dataset deprecation date when it has been once set") - if self.field_changed('preservation_identifier'): - self.preservation_identifier = self._initial_data['preservation_identifier'] + if self.field_changed("preservation_identifier"): + self.preservation_identifier = self._initial_data["preservation_identifier"] if not self.metadata_owner_org: # can not be updated to null - self.metadata_owner_org = self._initial_data['metadata_owner_org'] + self.metadata_owner_org = self._initial_data["metadata_owner_org"] - if self.field_changed('metadata_provider_org'): + if self.field_changed("metadata_provider_org"): # read-only after creating - self.metadata_provider_org = self._initial_data['metadata_provider_org'] + self.metadata_provider_org = self._initial_data["metadata_provider_org"] - if self.field_changed('metadata_provider_user'): + if self.field_changed("metadata_provider_user"): # read-only after creating - self.metadata_provider_user = self._initial_data['metadata_provider_user'] + self.metadata_provider_user = self._initial_data["metadata_provider_user"] - if self.field_changed('data_catalog_id'): + if self.field_changed("data_catalog_id"): if self.catalog_is_att(self._initial_data) and self.catalog_is_ida(): - self.research_dataset.pop('remote_resources') - self.research_dataset.pop('total_remote_resources_byte_size') + self.research_dataset.pop("remote_resources") + self.research_dataset.pop("total_remote_resources_byte_size") self._handle_metadata_versioning() - if settings.REMS['ENABLED']: + if settings.REMS["ENABLED"]: self._pre_update_handle_rems() - if self.field_changed('research_dataset'): + if self.field_changed("research_dataset"): if self.preservation_state in ( - self.PRESERVATION_STATE_INVALID_METADATA, # 40 - self.PRESERVATION_STATE_METADATA_VALIDATION_FAILED, # 50 - self.PRESERVATION_STATE_VALID_METADATA): # 70 + self.PRESERVATION_STATE_INVALID_METADATA, # 40 + self.PRESERVATION_STATE_METADATA_VALIDATION_FAILED, # 50 + self.PRESERVATION_STATE_VALID_METADATA, + ): # 70 # notifies the user in Hallintaliittyma that the metadata needs to be re-validated - self.preservation_state = self.PRESERVATION_STATE_VALIDATED_METADATA_UPDATED # 60 + self.preservation_state = self.PRESERVATION_STATE_VALIDATED_METADATA_UPDATED # 60 self.preservation_state_modified = self.date_modified self.update_datacite = True else: self.update_datacite = False - if self.field_changed('cumulative_state'): + if self.field_changed("cumulative_state"): raise Http400( "Cannot change cumulative state using REST API. " "use API /rpc/datasets/change_cumulative_state to change cumulative state." @@ -1405,13 +1641,14 @@ def _pre_update_operations(self): if self.catalog_is_pas(): actual_files_changed, _ = self._files_changed() if actual_files_changed: - _logger.info('File changes detected in PAS catalog dataset - aborting') - raise Http400({ 'detail': ['File changes not permitted in PAS catalog' ]}) + _logger.info("File changes detected in PAS catalog dataset - aborting") + raise Http400({"detail": ["File changes not permitted in PAS catalog"]}) - if self.catalog_versions_datasets() and \ - (not self.preserve_version or self.cumulative_state == self.CUMULATIVE_STATE_YES): + if self.catalog_versions_datasets() and ( + not self.preserve_version or self.cumulative_state == self.CUMULATIVE_STATE_YES + ): - if not self.field_changed('research_dataset'): + if not self.field_changed("research_dataset"): # proceed directly to updating current record without any extra measures... return @@ -1419,12 +1656,16 @@ def _pre_update_operations(self): if actual_files_changed: - if self.preservation_state > self.PRESERVATION_STATE_INITIALIZED: # state > 0 - raise Http400({ 'detail': [ - 'Changing files is not allowed when dataset is in PAS process. Current ' - 'preservation_state = %d. In order to alter associated files, change preservation_state ' - 'back to 0.' % self.preservation_state - ]}) + if self.preservation_state > self.PRESERVATION_STATE_INITIALIZED: # state > 0 + raise Http400( + { + "detail": [ + "Changing files is not allowed when dataset is in PAS process. Current " + "preservation_state = %d. In order to alter associated files, change preservation_state " + "back to 0." % self.preservation_state + ] + } + ) elif self._files_added_for_first_time(): # first update from 0 to n files should not create a dataset version. all later updates @@ -1435,10 +1676,10 @@ def _pre_update_operations(self): self.calculate_directory_byte_sizes_and_file_counts() elif self.cumulative_state == self.CUMULATIVE_STATE_YES: - if file_changes['files_to_remove'] or file_changes['dirs_to_remove_by_project']: + if file_changes["files_to_remove"] or file_changes["dirs_to_remove_by_project"]: raise Http400( - 'Cannot delete files or directories from cumulative dataset. ' - 'In order to remove files, close dataset cumulation.' + "Cannot delete files or directories from cumulative dataset. " + "In order to remove files, close dataset cumulation." ) self._handle_cumulative_file_addition(file_changes) @@ -1456,37 +1697,50 @@ def _pre_update_operations(self): changes = self._get_metadata_file_changes() - if any((changes['files']['added'], changes['files']['removed'], - changes['directories']['added'], changes['directories']['removed'])): - - raise ValidationError({ - 'detail': [ - 'currently trying to preserve version while making changes which may result in files ' - 'being changed is not supported.' - ] - }) + if any( + ( + changes["files"]["added"], + changes["files"]["removed"], + changes["directories"]["added"], + changes["directories"]["removed"], + ) + ): + + raise ValidationError( + { + "detail": [ + "currently trying to preserve version while making changes which may result in files " + "being changed is not supported." + ] + } + ) - if self.catalog_is_harvested() and self.field_changed('research_dataset.preferred_identifier'): + if self.catalog_is_harvested() and self.field_changed( + "research_dataset.preferred_identifier" + ): self._handle_preferred_identifier_changed() def _post_update_operations(self): - if get_identifier_type(self.preferred_identifier) == IdentifierType.DOI and \ - self.update_datacite: + if ( + get_identifier_type(self.preferred_identifier) == IdentifierType.DOI + and self.update_datacite + ): self._validate_cr_against_datacite_schema() - if is_metax_generated_doi_identifier(self.research_dataset['preferred_identifier']): - self.add_post_request_callable(DataciteDOIUpdate(self, - self.research_dataset['preferred_identifier'], 'update')) + if is_metax_generated_doi_identifier(self.research_dataset["preferred_identifier"]): + self.add_post_request_callable( + DataciteDOIUpdate(self, self.research_dataset["preferred_identifier"], "update") + ) - self.add_post_request_callable(RabbitMQPublishRecord(self, 'update')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "update")) log_args = { - 'event': 'dataset_updated', - 'user_id': self.user_modified or self.service_modified, - 'catalogrecord': { - 'identifier': self.identifier, - 'preferred_identifier': self.preferred_identifier, - 'data_catalog': self.data_catalog.catalog_json['identifier'], - 'date_modified': datetime_to_str(self.date_modified), + "event": "dataset_updated", + "user_id": self.user_modified or self.service_modified, + "catalogrecord": { + "identifier": self.identifier, + "preferred_identifier": self.preferred_identifier, + "data_catalog": self.data_catalog.catalog_json["identifier"], + "date_modified": datetime_to_str(self.date_modified), }, } @@ -1498,9 +1752,11 @@ def _validate_cr_against_datacite_schema(self): DataciteService, convert_cr_to_datacite_cr_json, ) + try: DataciteService().get_validated_datacite_json( - convert_cr_to_datacite_cr_json(self), True) + convert_cr_to_datacite_cr_json(self), True + ) except DataciteException as e: raise Http400(str(e)) @@ -1510,30 +1766,35 @@ def _pre_update_handle_rems(self): if self._dataset_has_rems_managed_access(): self._pre_rems_creation() else: - self._pre_rems_deletion(reason='access type change') + self._pre_rems_deletion(reason="access type change") elif self._dataset_license_changed() and self._dataset_has_rems_managed_access(): if self._dataset_has_license(): self.add_post_request_callable( - REMSUpdate(self, 'update', rems_id=self.rems_identifier, reason='license change') + REMSUpdate( + self, + "update", + rems_id=self.rems_identifier, + reason="license change", + ) ) self.rems_identifier = generate_uuid_identifier() # make sure that access_granter is not changed during license update - self.access_granter = self._initial_data['access_granter'] + self.access_granter = self._initial_data["access_granter"] else: - self._pre_rems_deletion(reason='license deletion') + self._pre_rems_deletion(reason="license deletion") - elif self.field_changed('access_granter'): + elif self.field_changed("access_granter"): # do not allow access_granter changes if no real REMS changes occur - self.access_granter = self._initial_data['access_granter'] + self.access_granter = self._initial_data["access_granter"] def _handle_cumulative_file_addition(self, file_changes): """ This method adds files to dataset only if they are explicitly mentioned in research_dataset. Changes in already included directories are not checked. """ - sql_select_and_insert_files_by_dir_path = ''' + sql_select_and_insert_files_by_dir_path = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, f.id from metax_api_file as f @@ -1547,22 +1808,23 @@ def _handle_cumulative_file_addition(self, file_changes): where catalogrecord_id = %s ) returning id - ''' + """ sql_params_insert_dirs = [self.id] add_dirs_sql = [] - for project, dir_paths in file_changes['dirs_to_add_by_project'].items(): + for project, dir_paths in file_changes["dirs_to_add_by_project"].items(): for dir_path in dir_paths: - add_dirs_sql.append("(f.project_identifier = %s and f.file_path like (%s || '/%%'))") + add_dirs_sql.append( + "(f.project_identifier = %s and f.file_path like (%s || '/%%'))" + ) sql_params_insert_dirs.extend([project, dir_path]) sql_select_and_insert_files_by_dir_path = sql_select_and_insert_files_by_dir_path.replace( - 'COMPARE_PROJECT_AND_FILE_PATHS', - ' or '.join(add_dirs_sql) + "COMPARE_PROJECT_AND_FILE_PATHS", " or ".join(add_dirs_sql) ) sql_params_insert_dirs.extend([self.id]) - sql_insert_single_files = ''' + sql_insert_single_files = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, f.id from metax_api_file as f @@ -1574,20 +1836,24 @@ def _handle_cumulative_file_addition(self, file_changes): where catalogrecord_id = %s ) returning id - ''' - sql_params_insert_single = [self.id, tuple(file_changes['files_to_add']), self.id] + """ + sql_params_insert_single = [ + self.id, + tuple(file_changes["files_to_add"]), + self.id, + ] with connection.cursor() as cr: n_files_added = 0 - if file_changes['files_to_add']: + if file_changes["files_to_add"]: cr.execute(sql_insert_single_files, sql_params_insert_single) n_files_added += cr.rowcount - if file_changes['dirs_to_add_by_project']: + if file_changes["dirs_to_add_by_project"]: cr.execute(sql_select_and_insert_files_by_dir_path, sql_params_insert_dirs) n_files_added += cr.rowcount - _logger.info('Added %d files to cumulative dataset %s' % (n_files_added, self.identifier)) + _logger.info("Added %d files to cumulative dataset %s" % (n_files_added, self.identifier)) self._calculate_total_files_byte_size() self.calculate_directory_byte_sizes_and_file_counts() @@ -1598,7 +1864,7 @@ def _files_added_for_first_time(self): """ Find out if this update is the first time files are being added/changed since the dataset's creation. """ - if self.files(manager='objects_unfiltered').exists(): + if self.files(manager="objects_unfiltered").exists(): # current version already has files return False @@ -1609,9 +1875,12 @@ def _files_added_for_first_time(self): return False metadata_versions_with_files_exist = ResearchDatasetVersion.objects.filter( - Q(Q(research_dataset__files__isnull=False) | Q(research_dataset__directories__isnull=False)), - catalog_record_id=self.id) \ - .exists() + Q( + Q(research_dataset__files__isnull=False) + | Q(research_dataset__directories__isnull=False) + ), + catalog_record_id=self.id, + ).exists() # metadata_versions_with_files_exist == True implies this "0 to n" update without # creating a new dataset version already occurred once @@ -1626,7 +1895,7 @@ def _pre_rems_creation(self): user_info = self._get_user_info_for_rems() self.access_granter = user_info self.rems_identifier = generate_uuid_identifier() - self.add_post_request_callable(REMSUpdate(self, 'create', user_info=user_info)) + self.add_post_request_callable(REMSUpdate(self, "create", user_info=user_info)) def _pre_rems_deletion(self, reason): """ @@ -1634,7 +1903,7 @@ def _pre_rems_deletion(self, reason): corresponding REMS entity. """ self.add_post_request_callable( - REMSUpdate(self, 'close', rems_id=self.rems_identifier, reason=reason) + REMSUpdate(self, "close", rems_id=self.rems_identifier, reason=reason) ) self.rems_identifier = None self.access_granter = None @@ -1655,7 +1924,9 @@ def _dataset_rems_changed(self): """ Check if dataset is updated so that REMS needs to be updated. """ - return self.catalog_is_ida() and (self._dataset_rems_access_type_changed() or self._dataset_license_changed()) + return self.catalog_is_ida() and ( + self._dataset_rems_access_type_changed() or self._dataset_license_changed() + ) def _dataset_has_license(self): """ @@ -1671,24 +1942,27 @@ def _dataset_license_changed(self): """ from metax_api.services import CatalogRecordService as CRS - return CRS.get_research_dataset_license_url(self.research_dataset) != \ - CRS.get_research_dataset_license_url(self._initial_data['research_dataset']) + return CRS.get_research_dataset_license_url( + self.research_dataset + ) != CRS.get_research_dataset_license_url(self._initial_data["research_dataset"]) def _calculate_total_files_byte_size(self): rd = self.research_dataset - if 'files' in rd or 'directories' in rd: - rd['total_files_byte_size'] = self.files.aggregate(Sum('byte_size'))['byte_size__sum'] or 0 + if "files" in rd or "directories" in rd: + rd["total_files_byte_size"] = ( + self.files.aggregate(Sum("byte_size"))["byte_size__sum"] or 0 + ) else: - rd['total_files_byte_size'] = 0 + rd["total_files_byte_size"] = 0 def _calculate_total_remote_resources_byte_size(self): rd = self.research_dataset - if 'remote_resources' in rd: - rd['total_remote_resources_byte_size'] = sum( - rr['byte_size'] for rr in rd['remote_resources'] if 'byte_size' in rr + if "remote_resources" in rd: + rd["total_remote_resources_byte_size"] = sum( + rr["byte_size"] for rr in rd["remote_resources"] if "byte_size" in rr ) else: - rd['total_remote_resources_byte_size'] = 0 + rd["total_remote_resources_byte_size"] = 0 def _get_dataset_selected_file_ids(self): """ @@ -1696,14 +1970,19 @@ def _get_dataset_selected_file_ids(self): of all unique individual files currently in the db. """ file_ids = [] - file_changes = { 'changed_projects': defaultdict(set) } + file_changes = {"changed_projects": defaultdict(set)} - if 'files' in self.research_dataset: - file_ids.extend(self._get_file_ids_from_file_list(self.research_dataset['files'], file_changes)) + if "files" in self.research_dataset: + file_ids.extend( + self._get_file_ids_from_file_list(self.research_dataset["files"], file_changes) + ) - if 'directories' in self.research_dataset: - file_ids.extend(self._get_file_ids_from_dir_list(self.research_dataset['directories'], - file_ids, file_changes)) + if "directories" in self.research_dataset: + file_ids.extend( + self._get_file_ids_from_dir_list( + self.research_dataset["directories"], file_ids, file_changes + ) + ) self._check_changed_files_permissions(file_changes) @@ -1719,20 +1998,28 @@ def _get_file_ids_from_file_list(self, file_list, file_changes): if not file_list: return [] - file_pids = [ f['identifier'] for f in file_list ] + file_pids = [f["identifier"] for f in file_list] - files = File.objects.filter(identifier__in=file_pids).values('id', 'identifier', 'project_identifier') + files = File.objects.filter(identifier__in=file_pids).values( + "id", "identifier", "project_identifier" + ) file_ids = [] for f in files: - file_ids.append(f['id']) - file_changes['changed_projects']['files_added'].add(f['project_identifier']) + file_ids.append(f["id"]) + file_changes["changed_projects"]["files_added"].add(f["project_identifier"]) if len(file_ids) != len(file_pids): - missing_identifiers = [ pid for pid in file_pids if pid not in set(f['identifier'] for f in files)] - raise ValidationError({ 'detail': [ - 'some requested files were not found. file identifiers not found:\n%s' - % '\n'.join(missing_identifiers) - ]}) + missing_identifiers = [ + pid for pid in file_pids if pid not in set(f["identifier"] for f in files) + ] + raise ValidationError( + { + "detail": [ + "some requested files were not found. file identifiers not found:\n%s" + % "\n".join(missing_identifiers) + ] + } + ) return file_ids @@ -1748,7 +2035,7 @@ def _get_file_ids_from_dir_list(self, dirs_list, ignore_files, file_changes): if not dirs_list: return [] - dir_identifiers = [ d['identifier'] for d in dirs_list ] + dir_identifiers = [d["identifier"] for d in dirs_list] highest_level_dirs_by_project = self._get_top_level_parent_dirs_by_project(dir_identifiers) @@ -1759,11 +2046,11 @@ def _get_file_ids_from_dir_list(self, dirs_list, ignore_files, file_changes): for dir_path in dir_paths: files = files | File.objects.filter( project_identifier=project_identifier, - file_path__startswith='%s/' % dir_path + file_path__startswith="%s/" % dir_path, ) - file_changes['changed_projects']['files_added'].add(project_identifier) + file_changes["changed_projects"]["files_added"].add(project_identifier) - return files.exclude(id__in=ignore_files).values_list('id', flat=True) + return files.exclude(id__in=ignore_files).values_list("id", flat=True) def _get_top_level_parent_dirs_by_project(self, dir_identifiers): """ @@ -1776,23 +2063,31 @@ def _get_top_level_parent_dirs_by_project(self, dir_identifiers): if not dir_identifiers: return {} - dirs = Directory.objects.filter(identifier__in=dir_identifiers) \ - .values('project_identifier', 'directory_path', 'identifier') \ - .order_by('project_identifier', 'directory_path') + dirs = ( + Directory.objects.filter(identifier__in=dir_identifiers) + .values("project_identifier", "directory_path", "identifier") + .order_by("project_identifier", "directory_path") + ) # skip deprecated datasets, since there might be deleted directories if len(dirs) != len(dir_identifiers) and not self.deprecated: - missing_identifiers = [ pid for pid in dir_identifiers if pid not in set(d['identifier'] for d in dirs)] - raise ValidationError({ 'detail': [ - 'some requested directories were not found. directory identifiers not found:\n%s' - % '\n'.join(missing_identifiers) - ]}) + missing_identifiers = [ + pid for pid in dir_identifiers if pid not in set(d["identifier"] for d in dirs) + ] + raise ValidationError( + { + "detail": [ + "some requested directories were not found. directory identifiers not found:\n%s" + % "\n".join(missing_identifiers) + ] + } + ) # group directory paths by project dirs_by_project = defaultdict(list) for dr in dirs: - dirs_by_project[dr['project_identifier']].append(dr['directory_path']) + dirs_by_project[dr["project_identifier"]].append(dr["directory_path"]) top_level_dirs_by_project = defaultdict(list) @@ -1800,14 +2095,16 @@ def _get_top_level_parent_dirs_by_project(self, dir_identifiers): for path in dir_paths: - dir_is_root = [ p.startswith('%s/' % path) for p in dir_paths if p != path ] + dir_is_root = [p.startswith("%s/" % path) for p in dir_paths if p != path] if all(dir_is_root): # found the root dir. disregard all the rest of the paths, if there were any. top_level_dirs_by_project[proj] = [path] break else: - path_contained_by_other_paths = [ path.startswith('%s/' % p) for p in dir_paths if p != path ] + path_contained_by_other_paths = [ + path.startswith("%s/" % p) for p in dir_paths if p != path + ] if any(path_contained_by_other_paths): # a child of at least one other path. no need to include it in the list @@ -1821,7 +2118,7 @@ def _get_top_level_parent_dirs_by_project(self, dir_identifiers): def _files_changed(self): file_changes = self._find_file_changes() - if not file_changes['changed_projects']: + if not file_changes["changed_projects"]: # no changes in directory or file entries were detected. return False, None elif self._files_added_for_first_time(): @@ -1853,20 +2150,24 @@ def _create_temp_record(self, file_changes): temp_record = self._create_new_dataset_version_template() - actual_files_changed = self._process_file_changes(file_changes, temp_record.id, self.id) + actual_files_changed = self._process_file_changes( + file_changes, temp_record.id, self.id + ) if actual_files_changed: if self.cumulative_state == self.CUMULATIVE_STATE_YES: - _logger.info('Files changed, but dataset is cumulative') + _logger.info("Files changed, but dataset is cumulative") raise DiscardRecord() elif self.catalog_is_pas(): - raise Http400('Cannot change files in a dataset in PAS catalog.') + raise Http400("Cannot change files in a dataset in PAS catalog.") self._new_version = temp_record else: - _logger.debug('no real file changes detected, discarding the temporary record...') + _logger.debug( + "no real file changes detected, discarding the temporary record..." + ) raise DiscardRecord() except DiscardRecord: # rolled back @@ -1889,58 +2190,68 @@ def _create_new_dataset_version_template(self): new_version_template.previous_dataset_version = None new_version_template.dataset_version_set = None new_version_template.identifier = generate_uuid_identifier() - new_version_template.research_dataset['metadata_version_identifier'] = generate_uuid_identifier() + new_version_template.research_dataset[ + "metadata_version_identifier" + ] = generate_uuid_identifier() new_version_template.preservation_identifier = None - new_version_template.api_meta['version'] = self.api_version + new_version_template.api_meta["version"] = self.api_version super(Common, new_version_template).save() return new_version_template def _check_changed_files_permissions(self, file_changes): - ''' + """ Ensure user belongs to projects of all changed files and dirs. Raises 403 on error. - ''' - if not self.request: # pragma: no cover + """ + if not self.request: # pragma: no cover # when files associated with a dataset have been changed, the user should be # always known, i.e. the http request object is present. if its not, the code # is not being executed as a result of a api request. in that case, only allow # proceeding when the code is executed for testing: the code is being called directly # from a test case, to set up test conditions etc. - assert executing_test_case(), 'only permitted when setting up testing conditions' + assert executing_test_case(), "only permitted when setting up testing conditions" return - projects_added = file_changes['changed_projects'].get('files_added', set()) - projects_removed = file_changes['changed_projects'].get('files_removed', set()) + projects_added = file_changes["changed_projects"].get("files_added", set()) + projects_removed = file_changes["changed_projects"].get("files_removed", set()) project_changes = projects_added.union(projects_removed) from metax_api.services import CommonService - allowed_projects = CommonService.get_list_query_param(self.request, 'allowed_projects') + + allowed_projects = CommonService.get_list_query_param(self.request, "allowed_projects") if allowed_projects is not None: if not all(p in allowed_projects for p in project_changes): - raise Http403({'detail': [ - 'Unable to update dataset %s. You do not have permissions to all of the files and directories.' - % self.identifier - ]}) + raise Http403( + { + "detail": [ + "Unable to update dataset %s. You do not have permissions to all of the files and directories." + % self.identifier + ] + } + ) if self.request.user.is_service: # assumed the service knows what it is doing return from metax_api.services import AuthService + user_projects = AuthService.get_user_projects(self.request) - invalid_project_perms = [ proj for proj in project_changes if proj not in user_projects ] + invalid_project_perms = [proj for proj in project_changes if proj not in user_projects] if invalid_project_perms: - raise Http403({ - 'detail': [ - 'Unable to add files to dataset. You are lacking project membership in the following projects: %s' - % ', '.join(invalid_project_perms) - ] - }) + raise Http403( + { + "detail": [ + "Unable to add files to dataset. You are lacking project membership in the following projects: %s" + % ", ".join(invalid_project_perms) + ] + } + ) def _handle_metadata_versioning(self): if not self.research_dataset_versions.exists(): @@ -1948,20 +2259,22 @@ def _handle_metadata_versioning(self): # when the first new version is created, first add the initial version. first_rdv = ResearchDatasetVersion( date_created=self.date_created, - metadata_version_identifier=self._initial_data['research_dataset']['metadata_version_identifier'], + metadata_version_identifier=self._initial_data["research_dataset"][ + "metadata_version_identifier" + ], preferred_identifier=self.preferred_identifier, - research_dataset=self._initial_data['research_dataset'], + research_dataset=self._initial_data["research_dataset"], catalog_record=self, ) first_rdv.save() # create and add the new metadata version - self.research_dataset['metadata_version_identifier'] = generate_uuid_identifier() + self.research_dataset["metadata_version_identifier"] = generate_uuid_identifier() new_rdv = ResearchDatasetVersion( date_created=self.date_modified, - metadata_version_identifier=self.research_dataset['metadata_version_identifier'], + metadata_version_identifier=self.research_dataset["metadata_version_identifier"], preferred_identifier=self.preferred_identifier, research_dataset=self.research_dataset, catalog_record=self, @@ -1972,18 +2285,22 @@ def _create_new_dataset_version(self): """ Create a new dataset version of the record who calls this method. """ - assert hasattr(self, '_new_version'), 'self._new_version should have been set in a previous step' + assert hasattr( + self, "_new_version" + ), "self._new_version should have been set in a previous step" old_version = self if old_version.next_dataset_version_id: - raise ValidationError({ 'detail': ['Changing files in old dataset versions is not permitted.'] }) + raise ValidationError( + {"detail": ["Changing files in old dataset versions is not permitted."]} + ) _logger.info( - 'Files changed during CatalogRecord update. Creating new dataset version ' - 'from old CatalogRecord having identifier %s' % old_version.identifier + "Files changed during CatalogRecord update. Creating new dataset version " + "from old CatalogRecord having identifier %s" % old_version.identifier ) _logger.debug( - 'Old CR metadata version identifier: %s' % old_version.metadata_version_identifier + "Old CR metadata version identifier: %s" % old_version.metadata_version_identifier ) new_version = self._new_version @@ -2010,29 +2327,35 @@ def _create_new_dataset_version(self): # contains the new field data from the request. this effectively transfers # the changes to the new dataset version. new_version.research_dataset = deepcopy(old_version.research_dataset) - new_version.research_dataset['metadata_version_identifier'] = generate_uuid_identifier() + new_version.research_dataset["metadata_version_identifier"] = generate_uuid_identifier() # This effectively means one cannot change identifier type for new catalog record versions - pref_id_type = get_identifier_type(old_version.research_dataset['preferred_identifier']) + pref_id_type = get_identifier_type(old_version.research_dataset["preferred_identifier"]) if pref_id_type == IdentifierType.URN: - new_version.research_dataset['preferred_identifier'] = generate_uuid_identifier(urn_prefix=True) + new_version.research_dataset["preferred_identifier"] = generate_uuid_identifier( + urn_prefix=True + ) elif pref_id_type == IdentifierType.DOI: doi_id = generate_doi_identifier() - new_version.research_dataset['preferred_identifier'] = doi_id + new_version.research_dataset["preferred_identifier"] = doi_id if self.catalog_is_ida(): new_version.preservation_identifier = doi_id else: - _logger.debug("This code should never be reached. Using URN identifier for the new version pref id") - self.research_dataset['preferred_identifier'] = generate_uuid_identifier(urn_prefix=True) + _logger.debug( + "This code should never be reached. Using URN identifier for the new version pref id" + ) + self.research_dataset["preferred_identifier"] = generate_uuid_identifier( + urn_prefix=True + ) new_version._calculate_total_files_byte_size() - if 'remote_resources' in new_version.research_dataset: + if "remote_resources" in new_version.research_dataset: new_version._calculate_total_remote_resources_byte_size() # nothing must change in the now old version of research_dataset, so copy # from _initial_data so that super().save() does not change it later. - old_version.research_dataset = deepcopy(old_version._initial_data['research_dataset']) + old_version.research_dataset = deepcopy(old_version._initial_data["research_dataset"]) old_version.next_dataset_version = new_version if new_version.editor: @@ -2041,30 +2364,36 @@ def _create_new_dataset_version(self): # they see as relevant. we also dont want null values in there old_editor = deepcopy(new_version.editor) new_version.editor = {} - if 'owner_id' in old_editor: - new_version.editor['owner_id'] = old_editor['owner_id'] - if 'creator_id' in old_editor: - new_version.editor['creator_id'] = old_editor['creator_id'] - if 'identifier' in old_editor: + if "owner_id" in old_editor: + new_version.editor["owner_id"] = old_editor["owner_id"] + if "creator_id" in old_editor: + new_version.editor["creator_id"] = old_editor["creator_id"] + if "identifier" in old_editor: # todo this probably does not make sense... ? - new_version.editor['identifier'] = old_editor['identifier'] + new_version.editor["identifier"] = old_editor["identifier"] super(Common, new_version).save() new_version.calculate_directory_byte_sizes_and_file_counts() - if is_metax_generated_doi_identifier(self.research_dataset['preferred_identifier']): - self.add_post_request_callable(DataciteDOIUpdate(new_version, - new_version.research_dataset['preferred_identifier'], - 'create')) + if is_metax_generated_doi_identifier(self.research_dataset["preferred_identifier"]): + self.add_post_request_callable( + DataciteDOIUpdate( + new_version, + new_version.research_dataset["preferred_identifier"], + "create", + ) + ) - new_version.add_post_request_callable(RabbitMQPublishRecord(new_version, 'create')) + new_version.add_post_request_callable(RabbitMQPublishRecord(new_version, "create")) old_version.new_dataset_version_created = new_version.identifiers_dict - old_version.new_dataset_version_created['version_type'] = 'dataset' + old_version.new_dataset_version_created["version_type"] = "dataset" - _logger.info('New dataset version created, identifier %s' % new_version.identifier) - _logger.debug('New dataset version preferred identifer %s' % new_version.preferred_identifier) + _logger.info("New dataset version created, identifier %s" % new_version.identifier) + _logger.debug( + "New dataset version preferred identifer %s" % new_version.preferred_identifier + ) def _get_metadata_file_changes(self): """ @@ -2074,28 +2403,32 @@ def _get_metadata_file_changes(self): Note: set removes duplicates. It is assumed that file listings do not include duplicate files. """ - if not self._field_is_loaded('research_dataset'): + if not self._field_is_loaded("research_dataset"): return {} - if not self._field_initial_value_loaded('research_dataset'): # pragma: no cover - self._raise_field_not_tracked_error('research_dataset.files') + if not self._field_initial_value_loaded("research_dataset"): # pragma: no cover + self._raise_field_not_tracked_error("research_dataset.files") changes = {} - initial_files = set( f['identifier'] for f in self._initial_data['research_dataset'].get('files', []) ) - received_files = set( f['identifier'] for f in self.research_dataset.get('files', []) ) - changes['files'] = { - 'keep': initial_files.intersection(received_files), - 'removed': initial_files.difference(received_files), - 'added': received_files.difference(initial_files), + initial_files = set( + f["identifier"] for f in self._initial_data["research_dataset"].get("files", []) + ) + received_files = set(f["identifier"] for f in self.research_dataset.get("files", [])) + changes["files"] = { + "keep": initial_files.intersection(received_files), + "removed": initial_files.difference(received_files), + "added": received_files.difference(initial_files), } - initial_dirs = set(dr['identifier'] for dr in self._initial_data['research_dataset'].get('directories', [])) - received_dirs = set( dr['identifier'] for dr in self.research_dataset.get('directories', []) ) - changes['directories'] = { - 'keep': initial_dirs.intersection(received_dirs), - 'removed': initial_dirs.difference(received_dirs), - 'added': received_dirs.difference(initial_dirs), + initial_dirs = set( + dr["identifier"] for dr in self._initial_data["research_dataset"].get("directories", []) + ) + received_dirs = set(dr["identifier"] for dr in self.research_dataset.get("directories", [])) + changes["directories"] = { + "keep": initial_dirs.intersection(received_dirs), + "removed": initial_dirs.difference(received_dirs), + "added": received_dirs.difference(initial_dirs), } return changes @@ -2104,18 +2437,20 @@ def calculate_directory_byte_sizes_and_file_counts(self): """ Calculate directory byte_sizes and file_counts for all dirs selected for this cr. """ - _logger.info('Calculating directory byte_sizes and file_counts...') + _logger.info("Calculating directory byte_sizes and file_counts...") dir_identifiers = file_dir_identifiers = [] - if self.research_dataset.get('directories', None): - dir_identifiers = [d['identifier'] for d in self.research_dataset['directories']] + if self.research_dataset.get("directories", None): + dir_identifiers = [d["identifier"] for d in self.research_dataset["directories"]] file_dir_identifiers = [] - if self.research_dataset.get('files', None): + if self.research_dataset.get("files", None): try: - file_dir_identifiers = [File.objects.get(identifier=f['identifier']).parent_directory.identifier - for f in self.research_dataset['files']] + file_dir_identifiers = [ + File.objects.get(identifier=f["identifier"]).parent_directory.identifier + for f in self.research_dataset["files"] + ] except Exception as e: _logger.error(e) @@ -2132,7 +2467,9 @@ def calculate_directory_byte_sizes_and_file_counts(self): directory_data = {} for project_identifier, dir_paths in highest_level_dirs_by_project.items(): - dirs = Directory.objects.filter(project_identifier=project_identifier, directory_path__in=dir_paths) + dirs = Directory.objects.filter( + project_identifier=project_identifier, directory_path__in=dir_paths + ) for dr in dirs: dr.calculate_byte_size_and_file_count_for_cr(self.id, directory_data) @@ -2140,11 +2477,12 @@ def calculate_directory_byte_sizes_and_file_counts(self): self.get_dirs_by_parent(dirs, directory_data) self._directory_data = directory_data - super(Common, self).save(update_fields=['_directory_data']) + super(Common, self).save(update_fields=["_directory_data"]) def get_dirs_by_parent(self, ids, directory_data): - dirs = Directory.objects.filter(id__in=ids, parent_directory_id__isnull=False) \ - .values_list('id', 'parent_directory_id') + dirs = Directory.objects.filter(id__in=ids, parent_directory_id__isnull=False).values_list( + "id", "parent_directory_id" + ) ids = [] if dirs: @@ -2177,17 +2515,20 @@ def _handle_preservation_state_changed(self): """ self.preservation_state_modified = self.date_modified - old_value = self._initial_data['preservation_state'] + old_value = self._initial_data["preservation_state"] new_value = self.preservation_state - _logger.info('preservation_state changed from %d to %d' % (old_value, new_value)) + _logger.info("preservation_state changed from %d to %d" % (old_value, new_value)) - if not self.preservation_dataset_origin_version_exists() and self.preservation_dataset_version is None \ - and self.catalog_is_pas(): + if ( + not self.preservation_dataset_origin_version_exists() + and self.preservation_dataset_version is None + and self.catalog_is_pas() + ): # dataset was created directly into PAS catalog. do nothing, no rules # are enforced (for now) _logger.info( - 'preservation_state changed from %d to %d. (native PAS catalog dataset)' + "preservation_state changed from %d to %d. (native PAS catalog dataset)" % (old_value, new_value) ) return @@ -2196,109 +2537,133 @@ def _handle_preservation_state_changed(self): if new_value == 0 and self.catalog_is_pas(): - _logger.info('Tried to set preservation_state to 0 on a PAS dataset. Aborting') + _logger.info("Tried to set preservation_state to 0 on a PAS dataset. Aborting") - raise Http400({ 'detail': [ - 'Can\'t set preservation_state to 0 on a PAS version. Set to %d or %d in order to conclude PAS process.' - % (self.PRESERVATION_STATE_IN_PAS, self.PRESERVATION_STATE_REJECTED_FROM_PAS) - ]}) + raise Http400( + { + "detail": [ + "Can't set preservation_state to 0 on a PAS version. Set to %d or %d in order to conclude PAS process." + % ( + self.PRESERVATION_STATE_IN_PAS, + self.PRESERVATION_STATE_REJECTED_FROM_PAS, + ) + ] + } + ) elif new_value <= self.PRESERVATION_STATE_ACCEPTED_TO_PAS and self.catalog_is_pas(): - raise Http400({ 'detail': [ - 'preservation_state values in PAS catalog should be over 80 (accepted to PAS)' - ]}) + raise Http400( + { + "detail": [ + "preservation_state values in PAS catalog should be over 80 (accepted to PAS)" + ] + } + ) elif new_value > self.PRESERVATION_STATE_ACCEPTED_TO_PAS and not self.catalog_is_pas(): - raise Http400({ 'detail': [ - 'Maximum value of preservation_state in a non-PAS catalog is 80 (accepted to PAS)' - ]}) + raise Http400( + { + "detail": [ + "Maximum value of preservation_state in a non-PAS catalog is 80 (accepted to PAS)" + ] + } + ) elif new_value == self.PRESERVATION_STATE_ACCEPTED_TO_PAS: if self.catalog_is_pas(): - raise Http400({ 'detail': [ - 'Dataset is already in PAS catalog' - ]}) + raise Http400({"detail": ["Dataset is already in PAS catalog"]}) elif self.preservation_dataset_version: - raise Http400({ 'detail': [ - 'Dataset already has a PAS version. Identifier: %s' % self.preservation_dataset_version.identifier - ]}) + raise Http400( + { + "detail": [ + "Dataset already has a PAS version. Identifier: %s" + % self.preservation_dataset_version.identifier + ] + } + ) else: self._create_pas_version(self) - _logger.info('Resetting preservation_state of original dataset to 0') + _logger.info("Resetting preservation_state of original dataset to 0") self.preservation_state = self.PRESERVATION_STATE_INITIALIZED self.preservation_description = None self.preservation_reason_description = None - elif new_value in (self.PRESERVATION_STATE_IN_PAS, self.PRESERVATION_STATE_REJECTED_FROM_PAS): - _logger.info('PAS-process concluded') + elif new_value in ( + self.PRESERVATION_STATE_IN_PAS, + self.PRESERVATION_STATE_REJECTED_FROM_PAS, + ): + _logger.info("PAS-process concluded") else: - _logger.debug('preservation_state change not handled for these values') + _logger.debug("preservation_state change not handled for these values") def _create_pas_version(self, origin_version): """ Create PAS version to PAS catalog and add related links. """ - _logger.info('Creating new PAS dataset version...') + _logger.info("Creating new PAS dataset version...") if origin_version.preservation_dataset_version_id: - msg = 'Dataset already has a PAS version. Identifier of PAS dataset: %s' \ + msg = ( + "Dataset already has a PAS version. Identifier of PAS dataset: %s" % origin_version.preservation_dataset_version.identifier + ) _logger.info(msg) - raise Http400({ 'detail': [msg] }) + raise Http400({"detail": [msg]}) try: - pas_catalog = DataCatalog.objects.only('id').get( + pas_catalog = DataCatalog.objects.only("id").get( catalog_json__identifier=settings.PAS_DATA_CATALOG_IDENTIFIER ) except DataCatalog.DoesNotExist: - msg = 'PAS catalog %s does not exist' % settings.PAS_DATA_CATALOG_IDENTIFIER + msg = "PAS catalog %s does not exist" % settings.PAS_DATA_CATALOG_IDENTIFIER _logger.info(msg) - raise Http400({ 'detail': [msg] }) + raise Http400({"detail": [msg]}) research_dataset = deepcopy(origin_version.research_dataset) - research_dataset.pop('preferred_identifier', None) - research_dataset.pop('metadata_version_identifier', None) + research_dataset.pop("preferred_identifier", None) + research_dataset.pop("metadata_version_identifier", None) params = { - 'data_catalog': pas_catalog, - 'research_dataset': research_dataset, - 'contract': origin_version.contract, - 'date_created': origin_version.date_modified, - 'service_created': origin_version.service_modified, - 'user_created': origin_version.user_modified, - 'metadata_owner_org': origin_version.metadata_owner_org, - 'metadata_provider_org': origin_version.metadata_provider_org, - 'metadata_provider_user': origin_version.metadata_provider_user, - 'preservation_state': origin_version.preservation_state, - 'preservation_description': origin_version.preservation_description, - 'preservation_reason_description': origin_version.preservation_reason_description, - 'preservation_dataset_origin_version': origin_version, + "data_catalog": pas_catalog, + "research_dataset": research_dataset, + "contract": origin_version.contract, + "date_created": origin_version.date_modified, + "service_created": origin_version.service_modified, + "user_created": origin_version.user_modified, + "metadata_owner_org": origin_version.metadata_owner_org, + "metadata_provider_org": origin_version.metadata_provider_org, + "metadata_provider_user": origin_version.metadata_provider_user, + "preservation_state": origin_version.preservation_state, + "preservation_description": origin_version.preservation_description, + "preservation_reason_description": origin_version.preservation_reason_description, + "preservation_dataset_origin_version": origin_version, } # add information about other identifiers for this dataset other_identifiers_info_origin = { - 'notation': origin_version.preferred_identifier, - 'type': { - 'identifier': get_identifier_type(origin_version.preferred_identifier).value, - } + "notation": origin_version.preferred_identifier, + "type": { + "identifier": get_identifier_type(origin_version.preferred_identifier).value, + }, } try: - params['research_dataset']['other_identifier'].append(other_identifiers_info_origin) + params["research_dataset"]["other_identifier"].append(other_identifiers_info_origin) except KeyError: - params['research_dataset']['other_identifier'] = [other_identifiers_info_origin] + params["research_dataset"]["other_identifier"] = [other_identifiers_info_origin] # validate/populate fields according to reference data from metax_api.services import CatalogRecordService as CRS, RedisCacheService as cache - CRS.validate_reference_data(params['research_dataset'], cache) + + CRS.validate_reference_data(params["research_dataset"], cache) # finally create the pas copy dataset pas_version = self.__class__(**params) @@ -2308,32 +2673,32 @@ def _create_pas_version(self, origin_version): # ensure pas dataset contains exactly the same files as origin dataset. clear the result # that was achieved by calling save(), which processed research_dataset.files and research_dataset.directories pas_version.files.clear() - pas_version.files.add(*origin_version.files.filter().values_list('id', flat=True)) + pas_version.files.add(*origin_version.files.filter().values_list("id", flat=True)) # link origin_version and pas copy origin_version.preservation_dataset_version = pas_version origin_version.new_dataset_version_created = pas_version.identifiers_dict - origin_version.new_dataset_version_created['version_type'] = 'pas' + origin_version.new_dataset_version_created["version_type"] = "pas" # add information about other identifiers for origin_dataset other_identifiers_info_pas = { - 'notation': pas_version.preferred_identifier, - 'type': { - 'identifier': get_identifier_type(pas_version.preferred_identifier).value, - } + "notation": pas_version.preferred_identifier, + "type": { + "identifier": get_identifier_type(pas_version.preferred_identifier).value, + }, } try: - origin_version.research_dataset['other_identifier'].append(other_identifiers_info_pas) + origin_version.research_dataset["other_identifier"].append(other_identifiers_info_pas) except KeyError: - origin_version.research_dataset['other_identifier'] = [other_identifiers_info_pas] + origin_version.research_dataset["other_identifier"] = [other_identifiers_info_pas] # need to validate ref data again for origin_version CRS.validate_reference_data(origin_version.research_dataset, cache) - self.add_post_request_callable(RabbitMQPublishRecord(pas_version, 'create')) + self.add_post_request_callable(RabbitMQPublishRecord(pas_version, "create")) - _logger.info('PAS dataset version created with identifier: %s' % pas_version.identifier) + _logger.info("PAS dataset version created with identifier: %s" % pas_version.identifier) def _check_alternate_records(self): """ @@ -2350,10 +2715,12 @@ def _check_alternate_records(self): below makes only one query. We are only interested in alternate_record_set though, since fetching it now saves another query later when checking if it already exists. """ - return CatalogRecord.objects.select_related('data_catalog', 'alternate_record_set') \ - .filter(research_dataset__contains={ 'preferred_identifier': self.preferred_identifier }) \ - .exclude(Q(data_catalog__id=self.data_catalog_id) | Q(id=self.id)) \ + return ( + CatalogRecord.objects.select_related("data_catalog", "alternate_record_set") + .filter(research_dataset__contains={"preferred_identifier": self.preferred_identifier}) + .exclude(Q(data_catalog__id=self.data_catalog_id) | Q(id=self.id)) .first() + ) def _create_or_update_alternate_record_set(self, other_record): """ @@ -2370,8 +2737,14 @@ def _create_or_update_alternate_record_set(self, other_record): ars = AlternateRecordSet() ars.save() ars.records.add(self, other_record) - _logger.info('Creating new alternate_record_set for preferred_identifier: %s, with records: %s and %s' % - (self.preferred_identifier, self.metadata_version_identifier, other_record.metadata_version_identifier)) + _logger.info( + "Creating new alternate_record_set for preferred_identifier: %s, with records: %s and %s" + % ( + self.preferred_identifier, + self.metadata_version_identifier, + other_record.metadata_version_identifier, + ) + ) def _remove_from_alternate_record_set(self): """ @@ -2393,24 +2766,29 @@ def add_post_request_callable(self, callable): In case of drafts, skip other than logging """ if not self.is_published() and not isinstance(callable, DelayedLog): - _logger.debug(f'{self.identifier} is a draft, skipping non-logging post request callables') + _logger.debug( + f"{self.identifier} is a draft, skipping non-logging post request callables" + ) return from metax_api.services import CallableService + CallableService.add_post_request_callable(callable) def __repr__(self): - return '<%s: %d, removed: %s, data_catalog: %s, metadata_version_identifier: %s, ' \ - 'preferred_identifier: %s, file_count: %d >' \ + return ( + "<%s: %d, removed: %s, data_catalog: %s, metadata_version_identifier: %s, " + "preferred_identifier: %s, file_count: %d >" % ( - 'CatalogRecord', + "CatalogRecord", self.id, str(self.removed), - self.data_catalog.catalog_json['identifier'], + self.data_catalog.catalog_json["identifier"], self.metadata_version_identifier, self.preferred_identifier, self.files.count(), ) + ) def _get_preferred_identifier_type_from_request(self): """ @@ -2419,7 +2797,7 @@ def _get_preferred_identifier_type_from_request(self): :return: IdentifierType. Return None if parameter not given or is unrecognized. Calling code is then responsible for choosing which IdentifierType to use. """ - pid_type = self.request.query_params.get('pid_type', None) + pid_type = self.request.query_params.get("pid_type", None) if pid_type == IdentifierType.DOI.value: pid_type = IdentifierType.DOI elif pid_type == IdentifierType.URN.value: @@ -2435,22 +2813,23 @@ def change_cumulative_state(self, new_state): self._assert_api_version() if self.next_dataset_version: - raise Http400('Cannot change cumulative_state on old dataset version') + raise Http400("Cannot change cumulative_state on old dataset version") - cumulative_state_valid_values = [ choice[0] for choice in self.CUMULATIVE_STATE_CHOICES ] + cumulative_state_valid_values = [choice[0] for choice in self.CUMULATIVE_STATE_CHOICES] try: new_state = int(new_state) assert new_state in cumulative_state_valid_values except: raise Http400( - 'cumulative_state must be one of: %s' % ', '.join(str(x) for x in cumulative_state_valid_values) + "cumulative_state must be one of: %s" + % ", ".join(str(x) for x in cumulative_state_valid_values) ) - _logger.info('Changing cumulative_state from %d to %d' % (self.cumulative_state, new_state)) + _logger.info("Changing cumulative_state from %d to %d" % (self.cumulative_state, new_state)) if self.cumulative_state == new_state: - _logger.info('No change in cumulative_state') + _logger.info("No change in cumulative_state") return False self.date_modified = get_tz_aware_now_without_micros() @@ -2462,31 +2841,33 @@ def change_cumulative_state(self, new_state): if new_state == self.CUMULATIVE_STATE_NO: raise Http400( - 'Cumulative dataset cannot be set to non-cumulative dataset. ' - 'If you want to stop active cumulation, set cumulative status to closed.' + "Cumulative dataset cannot be set to non-cumulative dataset. " + "If you want to stop active cumulation, set cumulative status to closed." ) elif new_state == self.CUMULATIVE_STATE_CLOSED: if self.cumulative_state == self.CUMULATIVE_STATE_NO: - raise Http400('Cumulation cannot be closed for non-cumulative dataset') + raise Http400("Cumulation cannot be closed for non-cumulative dataset") self.date_cumulation_ended = self.date_modified self.cumulative_state = new_state - super().save(update_fields=[ - 'cumulative_state', - 'date_cumulation_ended', - 'date_modified' - ]) + super().save( + update_fields=[ + "cumulative_state", + "date_cumulation_ended", + "date_modified", + ] + ) elif new_state == self.CUMULATIVE_STATE_YES: if self.preservation_state > self.PRESERVATION_STATE_INITIALIZED: raise Http400( - 'Cumulative datasets are not allowed in PAS process. Change preservation_state ' - 'to 0 in order to change the dataset to cumulative.' + "Cumulative datasets are not allowed in PAS process. Change preservation_state " + "to 0 in order to change the dataset to cumulative." ) new_version = self._create_new_dataset_version_template() @@ -2496,7 +2877,7 @@ def change_cumulative_state(self, new_state): super(Common, new_version).save() # add all files from previous version to new version - new_version.files.add(*self.files.values_list('id', flat=True)) + new_version.files.add(*self.files.values_list("id", flat=True)) self._new_version = new_version @@ -2504,7 +2885,7 @@ def change_cumulative_state(self, new_state): super().save() - self.add_post_request_callable(RabbitMQPublishRecord(self, 'update')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "update")) return True if new_state == self.CUMULATIVE_STATE_YES else False @@ -2518,27 +2899,32 @@ def refresh_directory_content(self, dir_identifier): self._assert_api_version() if self.deprecated: - raise Http400('Cannot update files on deprecated dataset. ' - 'You can remove all deleted files from dataset using API /rpc/datasets/fix_deprecated.') + raise Http400( + "Cannot update files on deprecated dataset. " + "You can remove all deleted files from dataset using API /rpc/datasets/fix_deprecated." + ) try: dir = Directory.objects.get(identifier=dir_identifier) except Directory.DoesNotExist: - raise Http404(f'Directory \'{dir_identifier}\' could not be found') + raise Http404(f"Directory '{dir_identifier}' could not be found") - dir_identifiers = [ d['identifier'] for d in self.research_dataset['directories'] ] - base_paths = Directory.objects.filter(identifier__in=dir_identifiers).values_list('directory_path', flat=True) + dir_identifiers = [d["identifier"] for d in self.research_dataset["directories"]] + base_paths = Directory.objects.filter(identifier__in=dir_identifiers).values_list( + "directory_path", flat=True + ) - if dir.directory_path not in base_paths and \ - not any([ dir.directory_path.startswith(f'{p}/') for p in base_paths ]): - raise Http400(f'Directory \'{dir_identifier}\' is not included in this dataset') + if dir.directory_path not in base_paths and not any( + [dir.directory_path.startswith(f"{p}/") for p in base_paths] + ): + raise Http400(f"Directory '{dir_identifier}' is not included in this dataset") added_file_ids = self._find_new_files_added_to_dir(dir) if not added_file_ids: - _logger.info('no change in directory content') + _logger.info("no change in directory content") return (False, 0) - _logger.info(f'refreshing directory adds {len(added_file_ids)} files to dataset') + _logger.info(f"refreshing directory adds {len(added_file_ids)} files to dataset") self.date_modified = get_tz_aware_now_without_micros() self.service_modified = self.request.user.username if self.request.user.is_service else None @@ -2553,19 +2939,19 @@ def refresh_directory_content(self, dir_identifier): super(Common, new_version).save() # add all files from previous version in addition to new ones - new_version.files.add(*added_file_ids, *self.files.values_list('id', flat=True)) + new_version.files.add(*added_file_ids, *self.files.values_list("id", flat=True)) self._new_version = new_version self._create_new_dataset_version() super().save() - self.add_post_request_callable(RabbitMQPublishRecord(self, 'update')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "update")) return (self.cumulative_state != self.CUMULATIVE_STATE_YES, len(added_file_ids)) def _find_new_files_added_to_dir(self, dir): - sql_insert_newly_frozen_files_by_dir_path = ''' + sql_insert_newly_frozen_files_by_dir_path = """ select f.id from metax_api_file as f where f.active = true and f.removed = false @@ -2576,12 +2962,16 @@ def _find_new_files_added_to_dir(self, dir): metax_api_catalogrecord_files cr_f where catalogrecord_id = %s ) - ''' - sql_params_insert_new_files = [dir.project_identifier, dir.directory_path, self.id] + """ + sql_params_insert_new_files = [ + dir.project_identifier, + dir.directory_path, + self.id, + ] with connection.cursor() as cr: cr.execute(sql_insert_newly_frozen_files_by_dir_path, sql_params_insert_new_files) - added_file_ids = [ v[0] for v in cr.fetchall() ] + added_file_ids = [v[0] for v in cr.fetchall()] return added_file_ids @@ -2598,33 +2988,39 @@ def fix_deprecated(self): self._copy_undeleted_files_from_old_version() self._create_new_dataset_version() super().save() - self.add_post_request_callable(RabbitMQPublishRecord(self, 'update')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "update")) def _fix_deprecated_research_dataset(self): - if self.research_dataset.get('files'): - pid_list = [ f['identifier'] for f in self.research_dataset['files'] ] - pid_list_fixed = File.objects.filter(identifier__in=pid_list).values_list('identifier', flat=True) + if self.research_dataset.get("files"): + pid_list = [f["identifier"] for f in self.research_dataset["files"]] + pid_list_fixed = File.objects.filter(identifier__in=pid_list).values_list( + "identifier", flat=True + ) if len(pid_list_fixed) != len(pid_list): - self.research_dataset['files'] = [ - f for f in self.research_dataset['files'] if f['identifier'] in pid_list_fixed + self.research_dataset["files"] = [ + f for f in self.research_dataset["files"] if f["identifier"] in pid_list_fixed ] - if not self.research_dataset['files']: - del self.research_dataset['files'] + if not self.research_dataset["files"]: + del self.research_dataset["files"] - if self.research_dataset.get('directories'): - pid_list = [ d['identifier'] for d in self.research_dataset['directories'] ] - pid_list_fixed = Directory.objects.filter(identifier__in=pid_list).values_list('identifier', flat=True) + if self.research_dataset.get("directories"): + pid_list = [d["identifier"] for d in self.research_dataset["directories"]] + pid_list_fixed = Directory.objects.filter(identifier__in=pid_list).values_list( + "identifier", flat=True + ) if len(pid_list_fixed) != len(pid_list): - self.research_dataset['directories'] = [ - d for d in self.research_dataset['directories'] if d['identifier'] in pid_list_fixed + self.research_dataset["directories"] = [ + d + for d in self.research_dataset["directories"] + if d["identifier"] in pid_list_fixed ] - if not self.research_dataset['directories']: - del self.research_dataset['directories'] + if not self.research_dataset["directories"]: + del self.research_dataset["directories"] def _copy_undeleted_files_from_old_version(self): - copy_undeleted_files_sql = ''' + copy_undeleted_files_sql = """ insert into metax_api_catalogrecord_files (catalogrecord_id, file_id) select %s as catalogrecord_id, file_id from metax_api_catalogrecord_files as cr_f @@ -2637,18 +3033,22 @@ def _copy_undeleted_files_from_old_version(self): where catalogrecord_id = %s ) returning file_id - ''' - sql_params_copy_undeleted = [self._new_version.id, self.id, self._new_version.id] + """ + sql_params_copy_undeleted = [ + self._new_version.id, + self.id, + self._new_version.id, + ] with connection.cursor() as cr: cr.execute(copy_undeleted_files_sql, sql_params_copy_undeleted) n_files_copied = cr.rowcount if DEBUG: - _logger.debug('Added %d files to dataset %s' % (n_files_copied, self._new_version.id)) + _logger.debug("Added %d files to dataset %s" % (n_files_copied, self._new_version.id)) -class RabbitMQPublishRecord(): +class RabbitMQPublishRecord: """ Callable object to be passed to CommonService.add_post_request_callable(callable). @@ -2657,7 +3057,11 @@ class RabbitMQPublishRecord(): """ def __init__(self, cr, routing_key): - assert routing_key in ('create', 'update', 'delete'), 'invalid value for routing_key' + assert routing_key in ( + "create", + "update", + "delete", + ), "invalid value for routing_key" self.cr = cr self.routing_key = routing_key @@ -2668,16 +3072,16 @@ def __call__(self): from metax_api.services import RabbitMQService as rabbitmq _logger.info( - 'Publishing CatalogRecord %s to RabbitMQ... routing_key: %s' + "Publishing CatalogRecord %s to RabbitMQ... routing_key: %s" % (self.cr.identifier, self.routing_key) ) - if self.routing_key == 'delete': - cr_json = { 'identifier': self.cr.identifier } + if self.routing_key == "delete": + cr_json = {"identifier": self.cr.identifier} else: cr_json = self._to_json() # Send full data_catalog json - cr_json['data_catalog'] = {'catalog_json': self.cr.data_catalog.catalog_json} + cr_json["data_catalog"] = {"catalog_json": self.cr.data_catalog.catalog_json} try: for exchange in settings.RABBITMQ["EXCHANGES"]: @@ -2685,17 +3089,17 @@ def __call__(self): except: # note: if we'd like to let the request be a success even if this operation fails, # we could simply not raise an exception here. - _logger.exception('Publishing rabbitmq message failed') - raise Http503({ 'detail': [ - 'failed to publish updates to rabbitmq. request is aborted.' - ]}) + _logger.exception("Publishing rabbitmq message failed") + raise Http503( + {"detail": ["failed to publish updates to rabbitmq. request is aborted."]} + ) def _to_json(self): serializer_class = self.cr.serializer_class return serializer_class(self.cr).data -class REMSUpdate(): +class REMSUpdate: """ Callable object to be passed to CommonService.add_post_request_callable(callable). @@ -2705,11 +3109,12 @@ class REMSUpdate(): def __init__(self, cr, action, **kwargs): from metax_api.services.rems_service import REMSService - assert action in ('close', 'create', 'update'), 'invalid value for action' + + assert action in ("close", "create", "update"), "invalid value for action" self.cr = cr - self.user_info = kwargs.get('user_info') - self.reason = kwargs.get('reason') - self.rems_id = kwargs.get('rems_id') + self.user_info = kwargs.get("user_info") + self.reason = kwargs.get("reason") + self.rems_id = kwargs.get("rems_id") self.action = action self.rems = REMSService() @@ -2723,26 +3128,24 @@ def __call__(self): it can be changed before saving and rems_service still knows what the resource identifier is. """ _logger.info( - 'Publishing CatalogRecord %s update to REMS... action: %s' + "Publishing CatalogRecord %s update to REMS... action: %s" % (self.cr.identifier, self.action) ) try: - if self.action == 'create': + if self.action == "create": self.rems.create_rems_entity(self.cr, self.user_info) - elif self.action == 'close': + elif self.action == "close": self.rems.close_rems_entity(self.rems_id, self.reason) - elif self.action == 'update': + elif self.action == "update": self.rems.update_rems_entity(self.cr, self.rems_id, self.reason) except Exception as e: _logger.error(e) - raise Http503({ 'detail': [ - 'failed to publish updates to rems. request is aborted.' - ]}) + raise Http503({"detail": ["failed to publish updates to rems. request is aborted."]}) -class DataciteDOIUpdate(): +class DataciteDOIUpdate: """ Callable object to be passed to CommonService.add_post_request_callable(callable). @@ -2759,7 +3162,8 @@ def __init__(self, cr, doi_identifier, action): :param action: """ from metax_api.services.datacite_service import DataciteService - assert action in ('create', 'update', 'delete'), 'invalid value for action' + + assert action in ("create", "update", "delete"), "invalid value for action" assert is_metax_generated_doi_identifier(doi_identifier) self.cr = cr self.doi_identifier = doi_identifier @@ -2772,44 +3176,50 @@ def __call__(self): Do not run for tests or in travis """ - if hasattr(settings, 'DATACITE'): - if not settings.DATACITE.get('ETSIN_URL_TEMPLATE', None): - raise Exception('Missing configuration from settings for DATACITE: ETSIN_URL_TEMPLATE') + if hasattr(settings, "DATACITE"): + if not settings.DATACITE.get("ETSIN_URL_TEMPLATE", None): + raise Exception( + "Missing configuration from settings for DATACITE: ETSIN_URL_TEMPLATE" + ) else: - raise Exception('Missing configuration from settings: DATACITE') + raise Exception("Missing configuration from settings: DATACITE") doi = extract_doi_from_doi_identifier(self.doi_identifier) if doi is None: return - if self.action == 'create': + if self.action == "create": _logger.info( - 'Publishing CatalogRecord {0} metadata and url to Datacite API using DOI {1}'. - format(self.cr.identifier, doi) + "Publishing CatalogRecord {0} metadata and url to Datacite API using DOI {1}".format( + self.cr.identifier, doi + ) ) - elif self.action == 'update': + elif self.action == "update": _logger.info( - 'Updating CatalogRecord {0} metadata and url to Datacite API using DOI {1}'. - format(self.cr.identifier, doi) + "Updating CatalogRecord {0} metadata and url to Datacite API using DOI {1}".format( + self.cr.identifier, doi + ) ) - elif self.action == 'delete': + elif self.action == "delete": _logger.info( - 'Deleting CatalogRecord {0} metadata from Datacite API using DOI {1}'. - format(self.cr.identifier, doi) + "Deleting CatalogRecord {0} metadata from Datacite API using DOI {1}".format( + self.cr.identifier, doi + ) ) from metax_api.services.datacite_service import DataciteException + try: - if self.action == 'create': + if self.action == "create": try: self._publish_to_datacite(doi) except Exception as e: # Try to delete DOI in case the DOI got created but stayed in "draft" state self.dcs.delete_draft_doi(doi) - raise(Exception(e)) - elif self.action == 'update': + raise (Exception(e)) + elif self.action == "update": self._publish_to_datacite(doi) - elif self.action == 'delete': + elif self.action == "delete": # If metadata is in "findable" state, the operation below should transition the DOI to "registered" # state self.dcs.delete_doi_metadata(doi) @@ -2818,13 +3228,14 @@ def __call__(self): raise Http400(str(e)) except Exception as e: _logger.error(e) - _logger.exception('Datacite API interaction failed') - raise Http503({'detail': [ - 'failed to publish updates to Datacite API. request is aborted.' - ]}) + _logger.exception("Datacite API interaction failed") + raise Http503( + {"detail": ["failed to publish updates to Datacite API. request is aborted."]} + ) def _publish_to_datacite(self, doi): from metax_api.services.datacite_service import convert_cr_to_datacite_cr_json + cr_json = convert_cr_to_datacite_cr_json(self.cr) datacite_xml = self.dcs.convert_catalog_record_to_datacite_xml(cr_json, True, True) _logger.debug("Datacite XML to be sent to Datacite API: {0}".format(datacite_xml)) @@ -2832,4 +3243,4 @@ def _publish_to_datacite(self, doi): # When the two operations below are successful, it should result in the DOI transitioning to # "findable" state self.dcs.create_doi_metadata(datacite_xml) - self.dcs.register_doi_url(doi, settings.DATACITE['ETSIN_URL_TEMPLATE'] % self.cr.identifier) + self.dcs.register_doi_url(doi, settings.DATACITE["ETSIN_URL_TEMPLATE"] % self.cr.identifier) diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index f35ad5b7..3ecec3f7 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -24,7 +24,12 @@ get_tz_aware_now_without_micros, ) -from .catalog_record import CatalogRecord, DataciteDOIUpdate, DatasetVersionSet, RabbitMQPublishRecord +from .catalog_record import ( + CatalogRecord, + DataciteDOIUpdate, + DatasetVersionSet, + RabbitMQPublishRecord, +) from .common import Common from .directory import Directory from .file import File @@ -49,7 +54,6 @@ class CatalogRecordV2(CatalogRecord): - class Meta: # CatalogRecordV2 operates on the same database table as CatalogRecord model. Only the class # behaviour may differ from base class. @@ -58,6 +62,7 @@ class Meta: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) from metax_api.api.rest.v2.serializers import CatalogRecordSerializerV2 + self.serializer_class = CatalogRecordSerializerV2 self.api_version = 2 @@ -66,14 +71,14 @@ def save(self, *args, **kwargs): Note: keys are popped from kwargs, because super().save() will complain if it receives unknown keyword arguments. """ - pid_type = kwargs.pop('pid_type', None) + pid_type = kwargs.pop("pid_type", None) if self._operation_is_create(): self._pre_create_operations() super(CatalogRecord, self).save(*args, **kwargs) self._post_create_operations(pid_type=pid_type) else: - self._pre_update_operations(draft_publish=kwargs.pop('draft_publish', False)) + self._pre_update_operations(draft_publish=kwargs.pop("draft_publish", False)) super(CatalogRecord, self).save(*args, **kwargs) self._post_update_operations() @@ -84,16 +89,20 @@ def delete(self, *args, **kwargs): elif self.is_draft_for_another_dataset(): draft_of = self.draft_of draft_of.next_draft = None - super(CatalogRecord, draft_of).save(update_fields=['next_draft']) + super(CatalogRecord, draft_of).save(update_fields=["next_draft"]) super().delete(*args, **kwargs) def _pre_create_operations(self): - if not self._check_catalog_permissions(self.data_catalog.catalog_record_group_create, - self.data_catalog.catalog_record_services_create): - raise Http403({ 'detail': [ 'You are not permitted to create datasets in this data catalog.' ]}) + if not self._check_catalog_permissions( + self.data_catalog.catalog_record_group_create, + self.data_catalog.catalog_record_services_create, + ): + raise Http403( + {"detail": ["You are not permitted to create datasets in this data catalog."]} + ) - self.research_dataset['metadata_version_identifier'] = generate_uuid_identifier() + self.research_dataset["metadata_version_identifier"] = generate_uuid_identifier() self.identifier = generate_uuid_identifier() if not self._save_as_draft(): @@ -102,33 +111,33 @@ def _pre_create_operations(self): self._set_api_version() def _post_create_operations(self, pid_type=None): - if 'files' in self.research_dataset or 'directories' in self.research_dataset: + if "files" in self.research_dataset or "directories" in self.research_dataset: # files must be added after the record itself has been created, to be able # to insert into a many2many relation. file_changes = { - 'files': self.research_dataset.get('files', []), - 'directories': self.research_dataset.get('directories', []), + "files": self.research_dataset.get("files", []), + "directories": self.research_dataset.get("directories", []), } self.change_files(file_changes, operation_is_create=True) if self._save_as_draft(): - _logger.debug('Saving new dataset as draft') + _logger.debug("Saving new dataset as draft") - self.research_dataset['preferred_identifier'] = 'draft:%s' % self.identifier + self.research_dataset["preferred_identifier"] = "draft:%s" % self.identifier if self._get_preferred_identifier_type_from_request() == IdentifierType.DOI: self.use_doi_for_published = True else: self.use_doi_for_published = False - super(Common, self).save(update_fields=['research_dataset', 'use_doi_for_published']) + super(Common, self).save(update_fields=["research_dataset", "use_doi_for_published"]) _logger.info( - 'Created a new ' + "Created a new " % (self.id, self.identifier) ) @@ -138,24 +147,24 @@ def _post_create_operations(self, pid_type=None): # logs correctly whether dataset is created into draft state, or also published log_args = { - 'catalogrecord': { - 'identifier': self.identifier, - 'preferred_identifier': self.preferred_identifier, - 'data_catalog': self.data_catalog.catalog_json['identifier'], - 'date_created': datetime_to_str(self.date_created), - 'metadata_owner_org': self.metadata_owner_org, - 'state': self.state, + "catalogrecord": { + "identifier": self.identifier, + "preferred_identifier": self.preferred_identifier, + "data_catalog": self.data_catalog.catalog_json["identifier"], + "date_created": datetime_to_str(self.date_created), + "metadata_owner_org": self.metadata_owner_org, + "state": self.state, }, - 'user_id': self.user_created or self.service_created, + "user_id": self.user_created or self.service_created, } self.add_post_request_callable(DelayedLog(**log_args)) def is_draft_for_another_dataset(self): - return hasattr(self, 'draft_of') and self.draft_of is not None + return hasattr(self, "draft_of") and self.draft_of is not None def has_next_draft(self): - return hasattr(self, 'next_draft') and self.next_draft is not None + return hasattr(self, "next_draft") and self.next_draft is not None def _save_as_draft(self): """ @@ -163,7 +172,8 @@ def _save_as_draft(self): and to be able to differentiate between v1 and v2 drafts. """ from metax_api.services import CommonService - return CommonService.get_boolean_query_param(self.request, 'draft') + + return CommonService.get_boolean_query_param(self.request, "draft") def publish_dataset(self, pid_type=None): """ @@ -178,26 +188,28 @@ def publish_dataset(self, pid_type=None): Note: The last three steps are executed at the very end of the HTTP request, but they are queued in this method. """ - _logger.info('Publishing dataset...') + _logger.info("Publishing dataset...") if self.state == self.STATE_PUBLISHED: - raise Http400('Dataset is already published.') + raise Http400("Dataset is already published.") elif self.is_draft_for_another_dataset(): raise Http400( - 'This dataset is a draft for another published dataset. To publish the draft changes, ' - 'use API /rpc/v2/datasets/merge_draft' + "This dataset is a draft for another published dataset. To publish the draft changes, " + "use API /rpc/v2/datasets/merge_draft" ) elif self.catalog_is_dft(): - raise Http400('Cannot publish dataset in draft catalog. ' - 'Please use parameter ?draft for adding a draft dataset') + raise Http400( + "Cannot publish dataset in draft catalog. " + "Please use parameter ?draft for adding a draft dataset" + ) self.state = self.STATE_PUBLISHED self._generate_issued_date() if self.catalog_is_pas(): - _logger.debug('Catalog is PAS - Using DOI as pref_id_type') + _logger.debug("Catalog is PAS - Using DOI as pref_id_type") # todo: default identifier type could probably be a parameter of the data catalog pref_id_type = IdentifierType.DOI elif self.use_doi_for_published is True: @@ -208,53 +220,59 @@ def publish_dataset(self, pid_type=None): self.use_doi_for_published = None if self.catalog_is_harvested(): - _logger.debug('Note: Catalog is harvested') + _logger.debug("Note: Catalog is harvested") # in harvested catalogs, the harvester is allowed to set the preferred_identifier. # do not overwrite. pass elif self.catalog_is_legacy(): - if 'preferred_identifier' not in self.research_dataset: + if "preferred_identifier" not in self.research_dataset: raise Http400( - 'Selected catalog %s is a legacy catalog. Preferred identifiers are not ' - 'automatically generated for datasets stored in legacy catalogs, nor is ' - 'their uniqueness enforced. Please provide a value for dataset field ' - 'preferred_identifier.' % self.data_catalog.catalog_json['identifier'] + "Selected catalog %s is a legacy catalog. Preferred identifiers are not " + "automatically generated for datasets stored in legacy catalogs, nor is " + "their uniqueness enforced. Please provide a value for dataset field " + "preferred_identifier." % self.data_catalog.catalog_json["identifier"] ) _logger.info( - 'Catalog %s is a legacy catalog - not generating pid' - % self.data_catalog.catalog_json['identifier'] + "Catalog %s is a legacy catalog - not generating pid" + % self.data_catalog.catalog_json["identifier"] ) else: if pref_id_type == IdentifierType.URN: - self.research_dataset['preferred_identifier'] = generate_uuid_identifier(urn_prefix=True) + self.research_dataset["preferred_identifier"] = generate_uuid_identifier( + urn_prefix=True + ) elif pref_id_type == IdentifierType.DOI: if not (self.catalog_is_ida() or self.catalog_is_pas()): raise Http400("Cannot create DOI for other than datasets in IDA or PAS catalog") - _logger.debug('pref_id_type == %s, generating doi' % pref_id_type) + _logger.debug("pref_id_type == %s, generating doi" % pref_id_type) doi_id = generate_doi_identifier() - self.research_dataset['preferred_identifier'] = doi_id + self.research_dataset["preferred_identifier"] = doi_id self.preservation_identifier = doi_id else: - _logger.info("Identifier type not specified in the request. Using URN identifier for pref id") + _logger.info( + "Identifier type not specified in the request. Using URN identifier for pref id" + ) # todo better to raise validation error instead - self.research_dataset['preferred_identifier'] = generate_uuid_identifier(urn_prefix=True) + self.research_dataset["preferred_identifier"] = generate_uuid_identifier( + urn_prefix=True + ) if not self.metadata_owner_org: # field metadata_owner_org is optional, but must be set. in case it is omitted, # derive from metadata_provider_org. self.metadata_owner_org = self.metadata_provider_org - if 'remote_resources' in self.research_dataset: + if "remote_resources" in self.research_dataset: self._calculate_total_remote_resources_byte_size() if self.cumulative_state == self.CUMULATIVE_STATE_CLOSED: - raise Http400('Cannot publish cumulative dataset with state closed') + raise Http400("Cannot publish cumulative dataset with state closed") elif self.cumulative_state == self.CUMULATIVE_STATE_YES: if self.preservation_state > self.PRESERVATION_STATE_INITIALIZED: - raise Http400('Dataset cannot be cumulative if it is in PAS process') + raise Http400("Dataset cannot be cumulative if it is in PAS process") self.date_cumulation_started = self.date_created @@ -273,19 +291,21 @@ def publish_dataset(self, pid_type=None): self._validate_cr_against_datacite_schema() self.add_post_request_callable( - DataciteDOIUpdate(self, self.research_dataset['preferred_identifier'], 'create') + DataciteDOIUpdate(self, self.research_dataset["preferred_identifier"], "create") ) - if self._dataset_has_rems_managed_access() and settings.REMS['ENABLED']: + if self._dataset_has_rems_managed_access() and settings.REMS["ENABLED"]: self._pre_rems_creation() - if self.api_version != self.api_meta['version']: + if self.api_version != self.api_meta["version"]: self._set_api_version() try: # drafts are validated with different schema until the publication so check that final result is valid # according to the actual data catalog. - serializer = self.serializer_class(self, context={'request': self.request}, data=self._initial_data) + serializer = self.serializer_class( + self, context={"request": self.request}, data=self._initial_data + ) serializer.validate_json_schema(self.research_dataset) except ValidationError as e: # apierrors couldn't handle the validation error thrown by the serializer @@ -294,11 +314,11 @@ def publish_dataset(self, pid_type=None): super(Common, self).save() _logger.info( - 'Published ' + "Published " % (self.id, self.identifier, self.preferred_identifier) ) - self.add_post_request_callable(RabbitMQPublishRecord(self, 'create')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "create")) def merge_draft(self): """ @@ -306,10 +326,10 @@ def merge_draft(self): published dataset. The draft record is destroyed once changes have been successfully merged. """ - _logger.info('Publishing changes from draft to a published record...') + _logger.info("Publishing changes from draft to a published record...") if not self.is_draft_for_another_dataset(): - raise Http400('Dataset is not a draft for another published dataset.') + raise Http400("Dataset is not a draft for another published dataset.") # by default retrieves the CatalogRecord object (not CatalogRecordV2!!) which behaves differently!! origin_cr = CatalogRecordV2.objects.get(next_draft_id=self.id) @@ -319,7 +339,10 @@ def merge_draft(self): origin_cr.date_modified = get_tz_aware_now_without_micros() origin_cr.user_modified = draft_cr.user_modified - if origin_cr.cumulative_state == self.CUMULATIVE_STATE_YES or origin_cr._files_added_for_first_time(): + if ( + origin_cr.cumulative_state == self.CUMULATIVE_STATE_YES + or origin_cr._files_added_for_first_time() + ): # ^ these checks should already be in place when files are added using change_files() method, # but checking here again. @@ -328,20 +351,20 @@ def merge_draft(self): if self.draft_of.removed: _logger.info( - 'Origin dataset is marked as removed - merging other changes to the published dataset, ' - 'but not adding files' + "Origin dataset is marked as removed - merging other changes to the published dataset, " + "but not adding files" ) elif self.draft_of.deprecated: _logger.info( - 'Origin dataset is deprecated - merging other changes to the published dataset, ' - 'but not adding files' + "Origin dataset is deprecated - merging other changes to the published dataset, " + "but not adding files" ) elif draft_files_count > origin_files_count: # ^ note: it should not be possible that the draft has lesser files, since # removing files is not permitted in any case. _logger.info( - 'Draft record has %d new files. Adding files to published record' + "Draft record has %d new files. Adding files to published record" % (draft_files_count - origin_files_count) ) @@ -351,16 +374,18 @@ def merge_draft(self): # files should now match, so it should be ok to just copy the directory data for file browsing origin_cr._directory_data = draft_cr._directory_data - elif draft_files_count < origin_files_count: # pragma: no cover + elif draft_files_count < origin_files_count: # pragma: no cover # should never happen - raise Exception('Files have been removed from the draft? This should not have been permitted') + raise Exception( + "Files have been removed from the draft? This should not have been permitted" + ) else: - _logger.info('No new files to add in draft') + _logger.info("No new files to add in draft") if self.draft_of.deprecated: raise Http400( - 'The origin dataset of this draft is deprecated. Changing files of a deprecated ' - 'dataset is not permitted. Please create a new dataset version first.' + "The origin dataset of this draft is deprecated. Changing files of a deprecated " + "dataset is not permitted. Please create a new dataset version first." ) # cumulative period can be closed. opening it is prevented through @@ -369,9 +394,9 @@ def merge_draft(self): # replace the "draft:" of the draft with the original pid so that # the save will not raise errors about it - draft_cr.research_dataset['preferred_identifier'] = origin_cr.preferred_identifier + draft_cr.research_dataset["preferred_identifier"] = origin_cr.preferred_identifier - if 'issued' not in draft_cr.research_dataset: + if "issued" not in draft_cr.research_dataset: draft_cr._generate_issued_date() # other than that, all values from research_dataset should be copied over @@ -392,80 +417,91 @@ def merge_draft(self): def _pre_update_operations(self, draft_publish=False): - if not self._check_catalog_permissions(self.data_catalog.catalog_record_group_edit, - self.data_catalog.catalog_record_services_edit): - raise Http403({ 'detail': [ 'You are not permitted to edit datasets in this data catalog.' ]}) + if not self._check_catalog_permissions( + self.data_catalog.catalog_record_group_edit, + self.data_catalog.catalog_record_services_edit, + ): + raise Http403( + {"detail": ["You are not permitted to edit datasets in this data catalog."]} + ) - if self.field_changed('api_meta'): - self.api_meta = self._initial_data['api_meta'] + if self.field_changed("api_meta"): + self.api_meta = self._initial_data["api_meta"] self._set_api_version() - if self.field_changed('identifier'): + if self.field_changed("identifier"): # read-only - self.identifier = self._initial_data['identifier'] + self.identifier = self._initial_data["identifier"] - if self.field_changed('research_dataset.metadata_version_identifier'): + if self.field_changed("research_dataset.metadata_version_identifier"): # read-only - self.research_dataset['metadata_version_identifier'] = \ - self._initial_data['research_dataset']['metadata_version_identifier'] + self.research_dataset["metadata_version_identifier"] = self._initial_data[ + "research_dataset" + ]["metadata_version_identifier"] - if self.field_changed('research_dataset.preferred_identifier'): + if self.field_changed("research_dataset.preferred_identifier"): if not (self.catalog_is_harvested() or self.catalog_is_legacy()): - raise Http400("Cannot change preferred_identifier in datasets in non-harvested catalogs") + raise Http400( + "Cannot change preferred_identifier in datasets in non-harvested catalogs" + ) - if self.field_changed('research_dataset.total_files_byte_size'): + if self.field_changed("research_dataset.total_files_byte_size"): if draft_publish: # allow update when merging changes from draft to published record pass - elif 'total_files_byte_size' in self._initial_data['research_dataset']: + elif "total_files_byte_size" in self._initial_data["research_dataset"]: # read-only - self.research_dataset['total_files_byte_size'] = \ - self._initial_data['research_dataset']['total_files_byte_size'] + self.research_dataset["total_files_byte_size"] = self._initial_data[ + "research_dataset" + ]["total_files_byte_size"] else: - self.research_dataset.pop('total_files_byte_size') + self.research_dataset.pop("total_files_byte_size") - if self.field_changed('research_dataset.total_remote_resources_byte_size'): + if self.field_changed("research_dataset.total_remote_resources_byte_size"): if draft_publish: # allow update when merging changes from draft to published record pass - elif 'total_remote_resources_byte_size' in self._initial_data['research_dataset']: + elif "total_remote_resources_byte_size" in self._initial_data["research_dataset"]: # read-only - self.research_dataset['total_remote_resources_byte_size'] = \ - self._initial_data['research_dataset']['total_remote_resources_byte_size'] + self.research_dataset["total_remote_resources_byte_size"] = self._initial_data[ + "research_dataset" + ]["total_remote_resources_byte_size"] else: - self.research_dataset.pop('total_remote_resources_byte_size') + self.research_dataset.pop("total_remote_resources_byte_size") - if self.field_changed('preservation_state'): + if self.field_changed("preservation_state"): if self.cumulative_state == self.CUMULATIVE_STATE_YES: - raise Http400('Changing preservation state is not allowed while dataset cumulation is active') + raise Http400( + "Changing preservation state is not allowed while dataset cumulation is active" + ) self._handle_preservation_state_changed() - if self.field_changed('deprecated') and self._initial_data['deprecated'] is True: + if self.field_changed("deprecated") and self._initial_data["deprecated"] is True: raise Http400("Cannot change dataset deprecation state from true to false") - if self.field_changed('date_deprecated') and self._initial_data['date_deprecated']: + if self.field_changed("date_deprecated") and self._initial_data["date_deprecated"]: raise Http400("Cannot change dataset deprecation date when it has been once set") - if self.field_changed('preservation_identifier'): - self.preservation_identifier = self._initial_data['preservation_identifier'] + if self.field_changed("preservation_identifier"): + self.preservation_identifier = self._initial_data["preservation_identifier"] if not self.metadata_owner_org: # can not be updated to null - self.metadata_owner_org = self._initial_data['metadata_owner_org'] + self.metadata_owner_org = self._initial_data["metadata_owner_org"] - if self.field_changed('metadata_provider_org'): + if self.field_changed("metadata_provider_org"): # read-only after creating - self.metadata_provider_org = self._initial_data['metadata_provider_org'] + self.metadata_provider_org = self._initial_data["metadata_provider_org"] - if self.field_changed('metadata_provider_user'): + if self.field_changed("metadata_provider_user"): # read-only after creating - self.metadata_provider_user = self._initial_data['metadata_provider_user'] + self.metadata_provider_user = self._initial_data["metadata_provider_user"] - if settings.REMS['ENABLED']: + if settings.REMS["ENABLED"]: self._pre_update_handle_rems() - if self.field_changed('cumulative_state'): + if self.field_changed("cumulative_state"): if draft_publish: # let cumulative state be updated if it is being changed when # draft record is being merged into a published record. @@ -477,10 +513,10 @@ def _pre_update_operations(self, draft_publish=False): "Use API /rpc/v2/datasets/change_cumulative_state to change cumulative state." ) - if self.field_changed('data_catalog_id'): + if self.field_changed("data_catalog_id"): if self.catalog_is_att(self._initial_data) and self.catalog_is_ida(): - self.research_dataset.pop('remote_resources') - self.research_dataset.pop('total_remote_resources_byte_size') + self.research_dataset.pop("remote_resources") + self.research_dataset.pop("total_remote_resources_byte_size") self._handle_metadata_versioning() if draft_publish: @@ -492,29 +528,36 @@ def _pre_update_operations(self, draft_publish=False): # be updated by using the api /rest/v2/datasets/pid/files/user_metadata for existing # files, or by using /rest/v2/datasets/pid/files for adding new files with user metadata. - if self.research_dataset.get('files') != self._initial_data['research_dataset'].get('files'): - if 'files' in self._initial_data['research_dataset']: - self.research_dataset['files'] = self._initial_data['research_dataset']['files'] + if self.research_dataset.get("files") != self._initial_data["research_dataset"].get( + "files" + ): + if "files" in self._initial_data["research_dataset"]: + self.research_dataset["files"] = self._initial_data["research_dataset"]["files"] else: - del self.research_dataset['files'] - - if self.research_dataset.get('directories') != self._initial_data['research_dataset'].get('directories'): - if 'directories' in self._initial_data['research_dataset']: - self.research_dataset['directories'] = self._initial_data['research_dataset']['directories'] + del self.research_dataset["files"] + + if self.research_dataset.get("directories") != self._initial_data[ + "research_dataset" + ].get("directories"): + if "directories" in self._initial_data["research_dataset"]: + self.research_dataset["directories"] = self._initial_data["research_dataset"][ + "directories" + ] else: - del self.research_dataset['directories'] + del self.research_dataset["directories"] - if self.field_changed('research_dataset') and self.state == self.STATE_PUBLISHED: + if self.field_changed("research_dataset") and self.state == self.STATE_PUBLISHED: self.update_datacite = True if self.preservation_state in ( - self.PRESERVATION_STATE_INVALID_METADATA, # 40 - self.PRESERVATION_STATE_METADATA_VALIDATION_FAILED, # 50 - self.PRESERVATION_STATE_VALID_METADATA): # 70 + self.PRESERVATION_STATE_INVALID_METADATA, # 40 + self.PRESERVATION_STATE_METADATA_VALIDATION_FAILED, # 50 + self.PRESERVATION_STATE_VALID_METADATA, + ): # 70 # notifies the user in Hallintaliittyma that the metadata needs to be re-validated - self.preservation_state = self.PRESERVATION_STATE_VALIDATED_METADATA_UPDATED # 60 + self.preservation_state = self.PRESERVATION_STATE_VALIDATED_METADATA_UPDATED # 60 self.preservation_state_modified = self.date_modified if self.catalog_versions_datasets(): @@ -525,7 +568,7 @@ def _pre_update_operations(self, draft_publish=False): self._handle_metadata_versioning() elif self.catalog_is_harvested(): - if self.field_changed('research_dataset.preferred_identifier'): + if self.field_changed("research_dataset.preferred_identifier"): self._handle_preferred_identifier_changed() else: @@ -540,34 +583,37 @@ def _update_dataset_specific_metadata(self, file_changes, operation_is_create=Fa schema. For existing entries, the existing entry is updated either by replacing it, or field by field, if using PATCH. """ - _logger.debug('Updating files dataset-specific metadata...') + _logger.debug("Updating files dataset-specific metadata...") - for object_type in ('files', 'directories'): + for object_type in ("files", "directories"): if not file_changes.get(object_type): - _logger.debug('No objects of type %s - continuing' % object_type) + _logger.debug("No objects of type %s - continuing" % object_type) continue # filter in only entries that ADD files. makes no sense to keep entries that exclude stuff add_entries = [ - obj for obj in file_changes[object_type] - if obj.get('exclude', False) is False - and obj.get('delete', False) is False + obj + for obj in file_changes[object_type] + if obj.get("exclude", False) is False and obj.get("delete", False) is False ] # remove the exclude and delete -keys, if they happen to exist. after that, if the obj contains # more than 1 key (the obj identifier), we know that it must contain dataset-specific metadata too. for obj in add_entries: - obj.pop('exclude', None) - obj.pop('delete', None) + obj.pop("exclude", None) + obj.pop("delete", None) - add_entries = [ obj for obj in add_entries if len(obj) > 1 ] + add_entries = [obj for obj in add_entries if len(obj) > 1] - _logger.debug('Received %d add entries' % len(add_entries)) + _logger.debug("Received %d add entries" % len(add_entries)) if operation_is_create: - _logger.debug('Note: operation_is_create=True - replacing all %s with received entries' % object_type) + _logger.debug( + "Note: operation_is_create=True - replacing all %s with received entries" + % object_type + ) if add_entries: # if there are no entries to add, do NOT set an empty array! @@ -577,14 +623,16 @@ def _update_dataset_specific_metadata(self, file_changes, operation_is_create=Fa # else -> update type operation - _logger.debug('Update operation type = %s' % self.request.META['REQUEST_METHOD']) + _logger.debug("Update operation type = %s" % self.request.META["REQUEST_METHOD"]) # take entries that delete metadata, and process them at the end delete_entries = set( - obj['identifier'] for obj in file_changes[object_type] if obj.get('delete', False) is True + obj["identifier"] + for obj in file_changes[object_type] + if obj.get("delete", False) is True ) - _logger.debug('Received %d delete entries' % len(delete_entries)) + _logger.debug("Received %d delete entries" % len(delete_entries)) new_entries = [] @@ -597,9 +645,9 @@ def _update_dataset_specific_metadata(self, file_changes, operation_is_create=Fa for current_obj in self.research_dataset.get(object_type, []): - if received_obj['identifier'] == current_obj['identifier']: + if received_obj["identifier"] == current_obj["identifier"]: - if self.request.META['REQUEST_METHOD'] in ('POST', 'PUT'): + if self.request.META["REQUEST_METHOD"] in ("POST", "PUT"): # replace object: drop all keys, and add only received keys below since re-assigning the # current_obj a new reference does not change the actual object in the array. current_obj.clear() @@ -624,19 +672,24 @@ def _update_dataset_specific_metadata(self, file_changes, operation_is_create=Fa # finally, delete all dataset-specific metadata entries as requested self.research_dataset[object_type] = [ - obj for obj in self.research_dataset[object_type] - if obj['identifier'] not in delete_entries + obj + for obj in self.research_dataset[object_type] + if obj["identifier"] not in delete_entries ] if not self.research_dataset[object_type]: # do not leave empty arrays in the dict del self.research_dataset[object_type] - _logger.debug('Number of %s metadata entries added: %d' % (object_type, len(new_entries))) - _logger.debug('Number of %s metadata entries updated: %d' % (object_type, num_updated)) - _logger.debug('Number of %s metadata entries deleted: %d' % (object_type, len(delete_entries))) + _logger.debug( + "Number of %s metadata entries added: %d" % (object_type, len(new_entries)) + ) + _logger.debug("Number of %s metadata entries updated: %d" % (object_type, num_updated)) + _logger.debug( + "Number of %s metadata entries deleted: %d" % (object_type, len(delete_entries)) + ) - super(Common, self).save(update_fields=['research_dataset']) + super(Common, self).save(update_fields=["research_dataset"]) def _files_added_for_first_time(self): """ @@ -651,12 +704,12 @@ def _cumulative_add_check_excludes(self, file_changes): raising an error and stopping the operation altogether should be the path of least astonishment. """ - for object_type in ('files', 'directories'): + for object_type in ("files", "directories"): for obj in file_changes.get(object_type, []): - if obj.get('exclude', False) is True: + if obj.get("exclude", False) is True: raise Http400( - 'Excluding files from a cumulative dataset is not permitted. ' - 'Please create a new dataset version first.' + "Excluding files from a cumulative dataset is not permitted. " + "Please create a new dataset version first." ) def change_files(self, file_changes, operation_is_create=False): @@ -706,23 +759,25 @@ def change_files(self, file_changes, operation_is_create=False): Here we can't trust in method _operation_is_create(), since it tries to detect creating phase from presence of self.id, which in this case is already there since we are so late in the create process. """ - _logger.debug('Changing dataset included files...') - _logger.debug('Note: operation_is_create=%r' % operation_is_create) + _logger.debug("Changing dataset included files...") + _logger.debug("Note: operation_is_create=%r" % operation_is_create) if self.deprecated: raise Http400( - 'Changing files of a deprecated dataset is not permitted. Please create a new dataset version first.' + "Changing files of a deprecated dataset is not permitted. Please create a new dataset version first." ) assert type(file_changes) is dict - assert hasattr(self, 'request') and self.request is not None + assert hasattr(self, "request") and self.request is not None - if not (file_changes.get('files') or file_changes.get('directories')): - _logger.debug('Received data does not include files or directories - returning') + if not (file_changes.get("files") or file_changes.get("directories")): + _logger.debug("Received data does not include files or directories - returning") return # create an instance of the serializer for later validations - serializer = self.serializer_class(self, context={'request': self.request}, data=self._initial_data) + serializer = self.serializer_class( + self, context={"request": self.request}, data=self._initial_data + ) if operation_is_create: # todo: probably it would be best to leave this timestamp field empty on initial create @@ -734,7 +789,7 @@ def change_files(self, file_changes, operation_is_create=False): # a published dataset. draft datasets in general allow for much greater freedom # in making changes to the files of a dataset. - _logger.debug('self.state == %s' % self.state) + _logger.debug("self.state == %s" % self.state) if self.state == self.STATE_DRAFT: @@ -749,8 +804,8 @@ def change_files(self, file_changes, operation_is_create=False): if self.draft_of.deprecated: raise Http400( - 'The origin dataset of this draft is deprecated. Changing files of a deprecated ' - 'dataset is not permitted. Please create a new dataset version first.' + "The origin dataset of this draft is deprecated. Changing files of a deprecated " + "dataset is not permitted. Please create a new dataset version first." ) elif self.draft_of._files_added_for_first_time(): @@ -764,10 +819,10 @@ def change_files(self, file_changes, operation_is_create=False): self._cumulative_add_check_excludes(file_changes) else: raise Http400( - 'Changing files of a published dataset is not permitted. ' - 'Please create a new dataset version first. ' - 'If you need to continuously add new files to a published dataset, ' - 'consider creating a new cumulative dataset.' + "Changing files of a published dataset is not permitted. " + "Please create a new dataset version first. " + "If you need to continuously add new files to a published dataset, " + "consider creating a new cumulative dataset." ) else: # "normal case": a new dataset in draft state. file changes are generally @@ -781,8 +836,8 @@ def change_files(self, file_changes, operation_is_create=False): if self.next_draft: raise Http400( - 'The dataset has an existing unmerged draft. While the draft exists, new files can only be ' - 'added to the draft record.' + "The dataset has an existing unmerged draft. While the draft exists, new files can only be " + "added to the draft record." ) elif self._files_added_for_first_time(): # first update from 0 to n files should be allowed even for a published dataset, and @@ -796,10 +851,10 @@ def change_files(self, file_changes, operation_is_create=False): else: # published dataset with no special status. changing files is not permitted. raise Http400( - 'Changing files of a published dataset is not permitted. ' - 'Please create a new dataset version first. ' - 'If you need to continuously add new files to a published dataset, ' - 'consider creating a new cumulative dataset.' + "Changing files of a published dataset is not permitted. " + "Please create a new dataset version first. " + "If you need to continuously add new files to a published dataset, " + "consider creating a new cumulative dataset." ) # validating received data in this method is only necessary for update operations. for @@ -815,50 +870,63 @@ def change_files(self, file_changes, operation_is_create=False): self.date_last_cumulative_addition = self.date_modified # for counting changes at the end - files_before_set = set(id for id in self.files.all().values_list('id', flat=True)) + files_before_set = set(id for id in self.files.all().values_list("id", flat=True)) files_excluded = False - for dr in file_changes.get('directories', []): + for dr in file_changes.get("directories", []): # process directory add end exclude entries in the order they are provided files = self._get_dataset_selected_file_ids( - dr['identifier'], 'directory', exclude=dr.get('exclude', False)) + dr["identifier"], "directory", exclude=dr.get("exclude", False) + ) - if dr.get('exclude', False) is False: - _logger.debug('Found %d files to add based on received directory objects' % len(files)) + if dr.get("exclude", False) is False: + _logger.debug( + "Found %d files to add based on received directory objects" % len(files) + ) self.files.add(*files) else: - _logger.debug('Found %d files to exclude based on received directory objects' % len(files)) + _logger.debug( + "Found %d files to exclude based on received directory objects" % len(files) + ) files_excluded = True self.files.remove(*files) # process individual file add and exclude entries. order does not matter. files_add = [ - f['identifier'] for f in file_changes.get('files', []) - if f.get('exclude', False) is False + f["identifier"] + for f in file_changes.get("files", []) + if f.get("exclude", False) is False ] files_exclude = [ - f['identifier'] for f in file_changes.get('files', []) - if f.get('exclude', False) is True + f["identifier"] + for f in file_changes.get("files", []) + if f.get("exclude", False) is True ] - file_ids_add = self._get_dataset_selected_file_ids(files_add, 'file', exclude=False) - file_ids_exclude = self._get_dataset_selected_file_ids(files_exclude, 'file', exclude=True) + file_ids_add = self._get_dataset_selected_file_ids(files_add, "file", exclude=False) + file_ids_exclude = self._get_dataset_selected_file_ids(files_exclude, "file", exclude=True) - _logger.debug('Found %d files to add based on received file objects' % len(file_ids_add)) - _logger.debug('Found %d files to exclude based on received file objects' % len(file_ids_exclude)) + _logger.debug("Found %d files to add based on received file objects" % len(file_ids_add)) + _logger.debug( + "Found %d files to exclude based on received file objects" % len(file_ids_exclude) + ) self.files.add(*file_ids_add) self.files.remove(*file_ids_exclude) # do final checking that resulting dataset contains files only from a single project - projects = self.files.all().values_list('project_identifier', flat=True).distinct('project_identifier') + projects = ( + self.files.all() + .values_list("project_identifier", flat=True) + .distinct("project_identifier") + ) if len(projects) > 1: - raise Http400('All added files must be from the same project') + raise Http400("All added files must be from the same project") if file_ids_exclude: files_excluded = True @@ -870,7 +938,9 @@ def change_files(self, file_changes, operation_is_create=False): self._clear_non_included_file_metadata_entries() # when adding new files, the entries can contain dataset-specific metadata - self._update_dataset_specific_metadata(file_changes, operation_is_create=operation_is_create) + self._update_dataset_specific_metadata( + file_changes, operation_is_create=operation_is_create + ) # update total file size, and numbers for directories that are shown when browsing files self._calculate_total_files_byte_size() @@ -886,7 +956,7 @@ def change_files(self, file_changes, operation_is_create=False): super(Common, self).save() # count effect of performed actions: number of files added and excluded - file_ids_after = self.files.all().values_list('id', flat=True) + file_ids_after = self.files.all().values_list("id", flat=True) if operation_is_create: files_added_count = len(file_ids_after) @@ -896,12 +966,12 @@ def change_files(self, file_changes, operation_is_create=False): files_excluded_count = len(files_before_set.difference(files_after_set)) ret = { - 'files_added': files_added_count, + "files_added": files_added_count, } if not operation_is_create: # operation is update. for a create operation, there would never be removed files - ret['files_removed'] = files_excluded_count + ret["files_removed"] = files_excluded_count return ret @@ -909,19 +979,19 @@ def _get_dataset_selected_file_ids(self, identifier_list, identifier_type, exclu """ Return a list of ids of all unique individual files currently in the db. """ - assert identifier_type in ('file', 'directory') + assert identifier_type in ("file", "directory") if type(identifier_list) is not list: # this method can accept a single identifier too identifier_list = [identifier_list] file_ids = [] - file_changes = { 'changed_projects': defaultdict(set) } + file_changes = {"changed_projects": defaultdict(set)} - if identifier_type == 'file': + if identifier_type == "file": file_ids = self._get_file_ids_from_file_list(identifier_list, file_changes, exclude) - elif identifier_type == 'directory': + elif identifier_type == "directory": file_ids = self._get_file_ids_from_dir_list(identifier_list, file_changes, exclude) self._check_changed_files_permissions(file_changes) @@ -938,25 +1008,28 @@ def _get_file_ids_from_file_list(self, file_identifiers, file_changes, exclude): if exclude: # retrieve files for the purpose excluding files from the dataset. search only from current dataset files - files = self.files \ - .filter(identifier__in=file_identifiers) \ - .values('id', 'identifier', 'project_identifier') + files = self.files.filter(identifier__in=file_identifiers).values( + "id", "identifier", "project_identifier" + ) else: # retrieve files for the purpose of adding new files. search only from files not already part of the dataset - files = File.objects \ - .filter(identifier__in=file_identifiers) \ - .exclude(id__in=self.files.all()) \ - .values('id', 'identifier', 'project_identifier') + files = ( + File.objects.filter(identifier__in=file_identifiers) + .exclude(id__in=self.files.all()) + .values("id", "identifier", "project_identifier") + ) if len(files) != len(file_identifiers): missing_identifiers = [ - pid for pid in file_identifiers if pid not in set([f['identifier'] for f in files]) + pid for pid in file_identifiers if pid not in set([f["identifier"] for f in files]) ] # ensure these were not already included in the dataset - from_record = self.files.filter(identifier__in=missing_identifiers).values_list('identifier', flat=True) + from_record = self.files.filter(identifier__in=missing_identifiers).values_list( + "identifier", flat=True + ) if len(missing_identifiers) == len(from_record): # all files were already part of the dataset. no files to add @@ -964,16 +1037,18 @@ def _get_file_ids_from_file_list(self, file_identifiers, file_changes, exclude): # otherwise, some were actually not found - missing_identifiers = [ f for f in missing_identifiers if f not in from_record ] + missing_identifiers = [f for f in missing_identifiers if f not in from_record] - raise Http400({ - 'detail': ['Some requested files were not found. File identifiers not found:'], - 'data': missing_identifiers - }) + raise Http400( + { + "detail": ["Some requested files were not found. File identifiers not found:"], + "data": missing_identifiers, + } + ) - file_changes['changed_projects']['files_added'].add(files[0]['project_identifier']) + file_changes["changed_projects"]["files_added"].add(files[0]["project_identifier"]) - return [ f['id'] for f in files ] + return [f["id"] for f in files] def _get_file_ids_from_dir_list(self, dir_identifiers, file_changes, exclude): """ @@ -984,54 +1059,59 @@ def _get_file_ids_from_dir_list(self, dir_identifiers, file_changes, exclude): if not dir_identifiers: return [] - dirs = Directory.objects.filter(identifier__in=dir_identifiers).values('project_identifier', 'directory_path') + dirs = Directory.objects.filter(identifier__in=dir_identifiers).values( + "project_identifier", "directory_path" + ) if len(dirs) == 0: - raise Http400('no directories matched given identifiers') + raise Http400("no directories matched given identifiers") elif len(dirs) != len(dir_identifiers): missing_identifiers = [ - pid for pid in dir_identifiers if pid not in set([dr['identifier'] for dr in dirs]) + pid for pid in dir_identifiers if pid not in set([dr["identifier"] for dr in dirs]) ] - raise Http400({ - 'detail': ['Some requested directories were not found. Directory identifiers not found:'], - 'data': missing_identifiers - }) + raise Http400( + { + "detail": [ + "Some requested directories were not found. Directory identifiers not found:" + ], + "data": missing_identifiers, + } + ) - project_identifier = dirs[0]['project_identifier'] + project_identifier = dirs[0]["project_identifier"] file_filter = Q() for dr in dirs: - if dr['project_identifier'] != project_identifier: - raise Http400('All added files must be from the same project') + if dr["project_identifier"] != project_identifier: + raise Http400("All added files must be from the same project") - if dr['directory_path'] == '/': + if dr["directory_path"] == "/": file_filter = Q() break else: - file_filter |= Q(file_path__startswith='%s/' % dr['directory_path']) + file_filter |= Q(file_path__startswith="%s/" % dr["directory_path"]) # results in ((path like x or path like y...) and project = z) file_filter &= Q(project_identifier=project_identifier) - file_changes['changed_projects']['files_added'].add(project_identifier) + file_changes["changed_projects"]["files_added"].add(project_identifier) if exclude: # retrieve files for the purpose excluding files from the dataset. search only from current dataset files - file_ids = self.files \ - .filter(file_filter) \ - .values_list('id', flat=True) + file_ids = self.files.filter(file_filter).values_list("id", flat=True) else: # retrieve files for the purpose of adding new files. search only from files not already part of the dataset - file_ids = File.objects \ - .filter(file_filter) \ - .exclude(id__in=self.files.all()) \ - .values_list('id', flat=True) + file_ids = ( + File.objects.filter(file_filter) + .exclude(id__in=self.files.all()) + .values_list("id", flat=True) + ) - return [ id for id in file_ids ] + return [id for id in file_ids] def _clear_non_included_file_metadata_entries(self, raise_on_not_found=False): """ @@ -1041,73 +1121,86 @@ def _clear_non_included_file_metadata_entries(self, raise_on_not_found=False): Parameter raise_on_not_found on can be used to raise an error if any metadata entry is not actually an included file in the dataset. """ - _logger.debug('Clearing non-included file metadata entries...') - _logger.debug('Note: raise_on_not_found=%r' % raise_on_not_found) + _logger.debug("Clearing non-included file metadata entries...") + _logger.debug("Note: raise_on_not_found=%r" % raise_on_not_found) # files - file_identifiers = [ f['identifier'] for f in self.research_dataset.get('files', []) ] + file_identifiers = [f["identifier"] for f in self.research_dataset.get("files", [])] if file_identifiers: included_files = set( - idf for idf in self.files.filter(identifier__in=file_identifiers).values_list('identifier', flat=True) + idf + for idf in self.files.filter(identifier__in=file_identifiers).values_list( + "identifier", flat=True + ) ) if raise_on_not_found: # note: use of this parameter should only be relevant when updating dataset-specific metadata. - not_included_files = [ idf for idf in file_identifiers if idf not in included_files ] + not_included_files = [idf for idf in file_identifiers if idf not in included_files] if not_included_files: - raise Http400({ - 'detail': [ - 'The following files are not included in the dataset, or the files may ' - 'have been marked as removed.' - ], - 'data': not_included_files - }) - - self.research_dataset['files'] = [ - f for f in self.research_dataset['files'] if f['identifier'] in included_files + raise Http400( + { + "detail": [ + "The following files are not included in the dataset, or the files may " + "have been marked as removed." + ], + "data": not_included_files, + } + ) + + self.research_dataset["files"] = [ + f for f in self.research_dataset["files"] if f["identifier"] in included_files ] # dirs - dir_identifiers = [ dr['identifier'] for dr in self.research_dataset.get('directories', []) ] + dir_identifiers = [dr["identifier"] for dr in self.research_dataset.get("directories", [])] if dir_identifiers: current_dir_entries = [ - dr for dr - in Directory.objects.filter(identifier__in=dir_identifiers).values('identifier', 'directory_path') + dr + for dr in Directory.objects.filter(identifier__in=dir_identifiers).values( + "identifier", "directory_path" + ) ] included_dirs = set() for dr in current_dir_entries: - dir_has_files = self.files.filter(file_path__startswith='%s/' % dr['directory_path']).exists() + dir_has_files = self.files.filter( + file_path__startswith="%s/" % dr["directory_path"] + ).exists() if dir_has_files: # directory or one of its sub directories has at least one file. therefore # this directory metadata entry should remain. - included_dirs.add(dr['identifier']) + included_dirs.add(dr["identifier"]) if raise_on_not_found: # note: use of this parameter should only be relevant when updating dataset-specific metadata. - not_included_dirs = [ idf for idf in dir_identifiers if idf not in included_dirs ] + not_included_dirs = [idf for idf in dir_identifiers if idf not in included_dirs] if not_included_dirs: - raise Http400({ - 'detail': 'The following directories do not contain any files in the dataset. ' - 'Please add files to the dataset first.', - 'data': not_included_dirs - }) - - self.research_dataset['directories'] = [ - dr for dr in self.research_dataset['directories'] if dr['identifier'] in included_dirs + raise Http400( + { + "detail": "The following directories do not contain any files in the dataset. " + "Please add files to the dataset first.", + "data": not_included_dirs, + } + ) + + self.research_dataset["directories"] = [ + dr + for dr in self.research_dataset["directories"] + if dr["identifier"] in included_dirs ] def update_files_dataset_specific_metadata(self, md_changes): @@ -1115,21 +1208,21 @@ def update_files_dataset_specific_metadata(self, md_changes): Update contents of fields research_dataset.files and research_dataset.directories, i.e. "user metadata" or "dataset-specific metadata". """ - _logger.info('Updating dataset file metadata...') + _logger.info("Updating dataset file metadata...") serializer = self.serializer_class(self) # note: this does json schema validation, and its output from the api is not user friendly # at all, but its better than nothing... - if self.request.META['REQUEST_METHOD'] == 'PUT': + if self.request.META["REQUEST_METHOD"] == "PUT": # do not validate for patch, since it can contain only partial fields serializer.validate_research_dataset_files(md_changes) - for object_type in ('files', 'directories'): + for object_type in ("files", "directories"): for obj in md_changes.get(object_type, []): - if 'identifier' not in obj: - raise Http400('\'identifier\' is a required field in all metadata entries.') + if "identifier" not in obj: + raise Http400("'identifier' is a required field in all metadata entries.") self._update_dataset_specific_metadata(md_changes, operation_is_create=False) self._clear_non_included_file_metadata_entries(raise_on_not_found=True) @@ -1138,7 +1231,7 @@ def update_files_dataset_specific_metadata(self, md_changes): files_and_dirs = {} - for object_type in ('files', 'directories'): + for object_type in ("files", "directories"): if object_type in self.research_dataset: files_and_dirs[object_type] = self.research_dataset[object_type] @@ -1156,18 +1249,20 @@ def create_draft(self): """ Create a new draft of a published dataset, that can later be merged back to the original published dataset. """ - _logger.info('Creating a draft of a published dataset...') + _logger.info("Creating a draft of a published dataset...") if self.is_draft_for_another_dataset(): - raise Http400('Dataset is already a draft for another published dataset.') + raise Http400("Dataset is already a draft for another published dataset.") elif self.state == self.STATE_DRAFT: # a new dataset in draft state - raise Http400('Dataset is already draft.') + raise Http400("Dataset is already draft.") if self.next_draft: raise Http400( - 'The dataset already has an existing unmerged draft: {}'.format(self.next_draft.preferred_identifier) + "The dataset already has an existing unmerged draft: {}".format( + self.next_draft.preferred_identifier + ) ) origin_cr = self @@ -1176,7 +1271,7 @@ def create_draft(self): draft_cr.date_created = get_tz_aware_now_without_micros() draft_cr.state = self.STATE_DRAFT draft_cr.cumulative_state = origin_cr.cumulative_state - draft_cr.research_dataset['preferred_identifier'] = 'draft:%s' % draft_cr.identifier + draft_cr.research_dataset["preferred_identifier"] = "draft:%s" % draft_cr.identifier super(CatalogRecord, draft_cr).save() @@ -1192,16 +1287,16 @@ def create_draft(self): super(CatalogRecord, origin_cr).save() log_args = { - 'event': 'dataset_draft_created', - 'user_id': draft_cr.user_created or draft_cr.service_created, - 'catalogrecord': { - 'identifier': draft_cr.identifier, - 'preferred_identifier': draft_cr.preferred_identifier, - 'data_catalog': draft_cr.data_catalog.catalog_json['identifier'], - 'date_created': datetime_to_str(draft_cr.date_created), - 'metadata_owner_org': draft_cr.metadata_owner_org, - 'state': draft_cr.state, - } + "event": "dataset_draft_created", + "user_id": draft_cr.user_created or draft_cr.service_created, + "catalogrecord": { + "identifier": draft_cr.identifier, + "preferred_identifier": draft_cr.preferred_identifier, + "data_catalog": draft_cr.data_catalog.catalog_json["identifier"], + "date_created": datetime_to_str(draft_cr.date_created), + "metadata_owner_org": draft_cr.metadata_owner_org, + "state": draft_cr.state, + }, } self.add_post_request_callable(DelayedLog(**log_args)) @@ -1211,22 +1306,22 @@ def create_new_version(self): A method to "explicitly" create a new version of a dataset, which is called from a particular RPC API endpoint. """ - _logger.info('Creating a new dataset version...') + _logger.info("Creating a new dataset version...") if self.is_draft_for_another_dataset(): raise Http400( - 'Can\'t create new version. Dataset is a draft for another published dataset: %s' + "Can't create new version. Dataset is a draft for another published dataset: %s" % self.draft_of.identifier ) elif self.has_next_draft(): raise Http400( - 'Can\'t create new version. Dataset has an unmerged draft: %s' + "Can't create new version. Dataset has an unmerged draft: %s" % self.next_draft.identifier ) elif not self.catalog_versions_datasets(): - raise Http400('Data catalog does not allow dataset versioning') + raise Http400("Data catalog does not allow dataset versioning") elif self.state == self.STATE_DRAFT: - raise Http400('Cannot create new version from draft dataset') + raise Http400("Cannot create new version from draft dataset") self._new_version = self._create_new_dataset_version_template() self._create_new_dataset_version() @@ -1235,16 +1330,21 @@ def _create_new_dataset_version(self): """ Create a new dataset version of the record who calls this method. """ - assert hasattr(self, '_new_version'), 'self._new_version should have been set in a previous step' + assert hasattr( + self, "_new_version" + ), "self._new_version should have been set in a previous step" old_version = self if old_version.next_dataset_version_id: raise Http400( - 'Dataset already has a next version: %s' % old_version.next_dataset_version.identifier + "Dataset already has a next version: %s" + % old_version.next_dataset_version.identifier ) - _logger.info('Creating new dataset version from old CatalogRecord: %s' % old_version.identifier) + _logger.info( + "Creating new dataset version from old CatalogRecord: %s" % old_version.identifier + ) new_version = self._new_version new_version.state = self.STATE_DRAFT @@ -1275,10 +1375,10 @@ def _create_new_dataset_version(self): new_version.user_created = self.request.user.username new_version.research_dataset = deepcopy(old_version.research_dataset) - new_version.research_dataset['metadata_version_identifier'] = generate_uuid_identifier() + new_version.research_dataset["metadata_version_identifier"] = generate_uuid_identifier() # temporary "pid" until draft is published - new_version.research_dataset['preferred_identifier'] = 'draft:%s' % self.identifier + new_version.research_dataset["preferred_identifier"] = "draft:%s" % self.identifier if old_version.files.exists(): # copy all files from previous version to new version. @@ -1302,13 +1402,13 @@ def _create_new_dataset_version(self): # they see as relevant. we also dont want null values in there old_editor = deepcopy(new_version.editor) new_version.editor = {} - if 'owner_id' in old_editor: - new_version.editor['owner_id'] = old_editor['owner_id'] - if 'creator_id' in old_editor: - new_version.editor['creator_id'] = old_editor['creator_id'] - if 'identifier' in old_editor: + if "owner_id" in old_editor: + new_version.editor["owner_id"] = old_editor["owner_id"] + if "creator_id" in old_editor: + new_version.editor["creator_id"] = old_editor["creator_id"] + if "identifier" in old_editor: # todo this probably does not make sense... ? - new_version.editor['identifier'] = old_editor['identifier'] + new_version.editor["identifier"] = old_editor["identifier"] # v2 api successfully invoked, change the api version to prevent further updates on v1 api self._set_api_version() @@ -1316,23 +1416,24 @@ def _create_new_dataset_version(self): super(Common, new_version).save() super(Common, old_version).save() - _logger.info('New dataset version draft created, identifier %s' % new_version.identifier) + _logger.info("New dataset version draft created, identifier %s" % new_version.identifier) log_args = { - 'catalogrecord': { - 'identifier': new_version.identifier, - 'preferred_identifier': new_version.preferred_identifier, - 'data_catalog': new_version.data_catalog.catalog_json['identifier'], - 'date_created': datetime_to_str(new_version.date_created), - 'metadata_owner_org': new_version.metadata_owner_org, - 'state': new_version.state, + "catalogrecord": { + "identifier": new_version.identifier, + "preferred_identifier": new_version.preferred_identifier, + "data_catalog": new_version.data_catalog.catalog_json["identifier"], + "date_created": datetime_to_str(new_version.date_created), + "metadata_owner_org": new_version.metadata_owner_org, + "state": new_version.state, }, - 'user_id': new_version.user_created or new_version.service_created, + "user_id": new_version.user_created or new_version.service_created, } - log_args['event'] = 'dataset_version_created' - log_args['catalogrecord']['previous_version_preferred_identifier'] \ - = new_version.previous_dataset_version.preferred_identifier + log_args["event"] = "dataset_version_created" + log_args["catalogrecord"][ + "previous_version_preferred_identifier" + ] = new_version.previous_dataset_version.preferred_identifier self.add_post_request_callable(DelayedLog(**log_args)) @@ -1344,23 +1445,24 @@ def change_cumulative_state(self, new_state): creating a new version into draft state first. """ if self.next_dataset_version: - raise Http400('Cannot change cumulative_state on old dataset version') + raise Http400("Cannot change cumulative_state on old dataset version") - cumulative_state_valid_values = [ choice[0] for choice in self.CUMULATIVE_STATE_CHOICES ] + cumulative_state_valid_values = [choice[0] for choice in self.CUMULATIVE_STATE_CHOICES] try: new_state = int(new_state) assert new_state in cumulative_state_valid_values except: raise Http400( - 'cumulative_state must be one of: %s' % ', '.join(str(x) for x in cumulative_state_valid_values) + "cumulative_state must be one of: %s" + % ", ".join(str(x) for x in cumulative_state_valid_values) ) if self.cumulative_state == new_state: - _logger.info('No change in cumulative_state') + _logger.info("No change in cumulative_state") return - _logger.info('Changing cumulative_state from %d to %d' % (self.cumulative_state, new_state)) + _logger.info("Changing cumulative_state from %d to %d" % (self.cumulative_state, new_state)) self.date_modified = get_tz_aware_now_without_micros() self.service_modified = self.request.user.username if self.request.user.is_service else None @@ -1385,7 +1487,9 @@ def change_cumulative_state(self, new_state): self.date_last_cumulative_addition = None elif new_state == self.CUMULATIVE_STATE_CLOSED: - raise Http400('For a new dataset, cumulative_state must be \'not cumulative\' or \'open\'') + raise Http400( + "For a new dataset, cumulative_state must be 'not cumulative' or 'open'" + ) elif new_state == self.CUMULATIVE_STATE_YES: # start date is set during publishing @@ -1403,16 +1507,16 @@ def change_cumulative_state(self, new_state): if new_state == self.CUMULATIVE_STATE_NO: raise Http400( - 'Cumulative dataset cannot be set to non-cumulative dataset. ' - 'If you want to stop active cumulation, set cumulative status to closed.' + "Cumulative dataset cannot be set to non-cumulative dataset. " + "If you want to stop active cumulation, set cumulative status to closed." ) elif new_state == self.CUMULATIVE_STATE_CLOSED: if comparison_cr.cumulative_state == self.CUMULATIVE_STATE_NO: - raise Http400('Cumulation cannot be closed for non-cumulative dataset') + raise Http400("Cumulation cannot be closed for non-cumulative dataset") elif self.cumulative_state == self.CUMULATIVE_STATE_CLOSED: - _logger.info('Note: cumulative_state is already CLOSED. Doing nothing') + _logger.info("Note: cumulative_state is already CLOSED. Doing nothing") return self.date_cumulation_ended = self.date_modified @@ -1423,18 +1527,18 @@ def change_cumulative_state(self, new_state): if comparison_cr.preservation_state > self.PRESERVATION_STATE_INITIALIZED: raise Http400( - 'Cumulative datasets are not allowed in PAS process. Change preservation_state ' - 'to 0 in order to change the dataset to cumulative.' + "Cumulative datasets are not allowed in PAS process. Change preservation_state " + "to 0 in order to change the dataset to cumulative." ) elif comparison_cr.files.count() > 0: # permits opening cumulativity for a published dataset that does not yet # have any files. raise Http400( - 'Can\'t set dataset cumulative: Dataset already has files. Please create ' - 'a new dataset version first.' + "Can't set dataset cumulative: Dataset already has files. Please create " + "a new dataset version first." ) elif self.cumulative_state == self.CUMULATIVE_STATE_YES: - _logger.info('Note: cumulative_state is already YES. Doing nothing') + _logger.info("Note: cumulative_state is already YES. Doing nothing") return self.date_cumulation_started = self.date_modified @@ -1447,7 +1551,7 @@ def change_cumulative_state(self, new_state): super(CatalogRecord, self).save() # Handles with drafts - self.add_post_request_callable(RabbitMQPublishRecord(self, 'update')) + self.add_post_request_callable(RabbitMQPublishRecord(self, "update")) def calculate_directory_byte_sizes_and_file_counts(self): """ @@ -1456,7 +1560,7 @@ def calculate_directory_byte_sizes_and_file_counts(self): if not self.files.exists(): return - _logger.info('Calculating directory byte_sizes and file_counts...') + _logger.info("Calculating directory byte_sizes and file_counts...") parent_dir = self.files.first().parent_directory @@ -1470,4 +1574,4 @@ def calculate_directory_byte_sizes_and_file_counts(self): root_dir.calculate_byte_size_and_file_count_for_cr(self.id, directory_data) self._directory_data = directory_data - super(Common, self).save(update_fields=['_directory_data']) + super(Common, self).save(update_fields=["_directory_data"]) diff --git a/src/metax_api/models/common.py b/src/metax_api/models/common.py index b448a1b1..62b5f0b6 100755 --- a/src/metax_api/models/common.py +++ b/src/metax_api/models/common.py @@ -15,7 +15,6 @@ class CommonManager(models.Manager): - def get_queryset(self): return super(CommonManager, self).get_queryset().filter(active=True, removed=False) @@ -31,10 +30,16 @@ class Common(models.Model): user_modified = models.CharField(max_length=200, null=True) date_created = models.DateTimeField() user_created = models.CharField(max_length=200, null=True) - service_modified = models.CharField(max_length=200, null=True, - help_text='Name of the service who last modified the record') - service_created = models.CharField(max_length=200, null=True, - help_text='Name of the service who created the record') + service_modified = models.CharField( + max_length=200, + null=True, + help_text="Name of the service who last modified the record", + ) + service_created = models.CharField( + max_length=200, + null=True, + help_text="Name of the service who created the record", + ) date_removed = models.DateTimeField(null=True) # END OF MODEL FIELD DEFINITIONS # @@ -50,14 +55,14 @@ class Common(models.Model): class Meta: indexes = [ - models.Index(fields=['active']), - models.Index(fields=['removed']), + models.Index(fields=["active"]), + models.Index(fields=["removed"]), ] abstract = True def __init__(self, *args, **kwargs): - if '__request' in kwargs: - self.request = kwargs.pop('__request') + if "__request" in kwargs: + self.request = kwargs.pop("__request") super(Common, self).__init__(*args, **kwargs) @@ -65,9 +70,9 @@ def __init__(self, *args, **kwargs): self._tracked_fields = [] self.track_fields( - 'date_created', - 'user_created', - 'service_created', + "date_created", + "user_created", + "service_created", ) def save(self, *args, **kwargs): @@ -83,7 +88,7 @@ def force_save(self, *args, **kwargs): checks. Should be used only in testing to set up data for a test case. """ if not executing_test_case(): - raise Exception('this method should only be used inside a test case') + raise Exception("this method should only be used inside a test case") super(Common, self).save(*args, **kwargs) self._update_tracked_field_values() @@ -92,7 +97,7 @@ def remove(self): Mark record as removed, never delete from db. """ self._set_removed() - super().save(update_fields=['removed', 'date_removed', 'date_modified']) + super().save(update_fields=["removed", "date_removed", "date_modified"]) self._update_tracked_field_values() def user_has_access(self, request): @@ -140,7 +145,7 @@ def track_fields(self, *fields): self._tracked_fields.append(field_name) - if '.' in field_name: + if "." in field_name: self._track_json_field(field_name) else: if self._field_is_loaded(field_name): @@ -160,7 +165,7 @@ def _unset_removed(self): self.date_modified = get_tz_aware_now_without_micros() def _track_json_field(self, field_name): - field_name, json_field_name = field_name.split('.') + field_name, json_field_name = field_name.split(".") if self._field_is_loaded(field_name) and json_field_name in getattr(self, field_name): json_field_value = getattr(self, field_name)[json_field_name] @@ -194,7 +199,7 @@ def field_changed(self, field_name): """ Check if a tracked field has changed since last saved to db. """ - if '.' in field_name: + if "." in field_name: return self._json_field_changed(field_name) if not self._field_is_loaded(field_name): @@ -203,13 +208,13 @@ def field_changed(self, field_name): if self._field_is_tracked(field_name): # pragma: no cover if not self._field_initial_value_loaded(field_name): self._raise_field_not_tracked_error(field_name) - else: # pragma: no cover - raise FieldError('Field %s is not being tracked for changes' % (field_name)) + else: # pragma: no cover + raise FieldError("Field %s is not being tracked for changes" % (field_name)) return getattr(self, field_name) != self._initial_data[field_name] def _json_field_changed(self, field_name_full): - field_name, json_field_name = field_name_full.split('.') + field_name, json_field_name = field_name_full.split(".") if not self._field_is_loaded(field_name): return False @@ -217,8 +222,8 @@ def _json_field_changed(self, field_name_full): if self._field_is_tracked(field_name_full): # pragma: no cover if not self._field_initial_value_loaded(field_name): self._raise_field_not_tracked_error(field_name_full) - else: # pragma: no cover - raise FieldError('Field %s is not being tracked for changes' % (field_name_full)) + else: # pragma: no cover + raise FieldError("Field %s is not being tracked for changes" % (field_name_full)) json_field_value = self._initial_data[field_name].get(json_field_name, None) return getattr(self, field_name).get(json_field_name, None) != json_field_value @@ -238,18 +243,18 @@ def _raise_field_not_tracked_error(self, field_name): their initial query to include all the data they were going to need anyway. """ raise FieldError( - 'Tried to check changes in field %(field_name)s, but the field was not loaded for ' - 'tracking changes during __init__. Call .only(%(field_name)s) in you ORM query to ' - 'load the field during __init__, so that it will be tracked.' % locals() + "Tried to check changes in field %(field_name)s, but the field was not loaded for " + "tracking changes during __init__. Call .only(%(field_name)s) in you ORM query to " + "load the field during __init__, so that it will be tracked." % locals() ) def _check_read_only_after_create_fields(self): - if self.field_changed('date_created'): - self.date_created = self._initial_data['date_created'] - if self.field_changed('user_created'): - self.user_created = self._initial_data['user_created'] - if self.field_changed('service_created'): - self.service_created = self._initial_data['service_created'] + if self.field_changed("date_created"): + self.date_created = self._initial_data["date_created"] + if self.field_changed("user_created"): + self.user_created = self._initial_data["user_created"] + if self.field_changed("service_created"): + self.service_created = self._initial_data["service_created"] def _operation_is_create(self): return self.id is None @@ -263,10 +268,12 @@ def _update_tracked_field_values(self): field_changed() keeps working as expected """ for field_name in self._initial_data.keys(): - if '.' in field_name: - field_name, json_field_name = field_name.split('.') + if "." in field_name: + field_name, json_field_name = field_name.split(".") # by now should have crashed to checks in previous steps, so no need to check here - self._initial_data[field_name][json_field_name] = getattr(self, field_name).get(json_field_name, None) + self._initial_data[field_name][json_field_name] = getattr(self, field_name).get( + json_field_name, None + ) else: self._initial_data[field_name] = getattr(self, field_name) diff --git a/src/metax_api/models/contract.py b/src/metax_api/models/contract.py index c5940eb3..e7b75030 100755 --- a/src/metax_api/models/contract.py +++ b/src/metax_api/models/contract.py @@ -26,18 +26,19 @@ def delete(self): https://docs.djangoproject.com/en/1.11/topics/db/models/#overriding-model-methods """ super(Contract, self).remove() - sql = 'update metax_api_catalogrecord set removed = true ' \ - 'where active = true and removed = false ' \ - 'and contract_id = %s' + sql = ( + "update metax_api_catalogrecord set removed = true " + "where active = true and removed = false " + "and contract_id = %s" + ) with connection.cursor() as cr: cr.execute(sql, [self.id]) def __repr__(self): - return '<%s: %d, removed: %s, identifier: %d, record_count: %d >' \ - % ( - 'Contract', - self.id, - str(self.removed), - self.contract_json['identifier'], - self.records.count(), - ) + return "<%s: %d, removed: %s, identifier: %d, record_count: %d >" % ( + "Contract", + self.id, + str(self.removed), + self.contract_json["identifier"], + self.records.count(), + ) diff --git a/src/metax_api/models/data_catalog.py b/src/metax_api/models/data_catalog.py index f2660a04..37e2c100 100755 --- a/src/metax_api/models/data_catalog.py +++ b/src/metax_api/models/data_catalog.py @@ -18,57 +18,75 @@ class DataCatalog(Common): catalog_json = JSONField() catalog_record_group_edit = models.CharField( - max_length=200, blank=False, null=True, - help_text='Group which is allowed to edit catalog records in the catalog.') + max_length=200, + blank=False, + null=True, + help_text="Group which is allowed to edit catalog records in the catalog.", + ) catalog_record_group_create = models.CharField( - max_length=200, blank=False, null=True, - help_text='Group which is allowed to add new catalog records to the catalog.') + max_length=200, + blank=False, + null=True, + help_text="Group which is allowed to add new catalog records to the catalog.", + ) catalog_record_services_edit = models.CharField( - max_length=200, blank=False, null=True, - help_text='Services which are allowed to edit catalog records in the catalog.') + max_length=200, + blank=False, + null=True, + help_text="Services which are allowed to edit catalog records in the catalog.", + ) catalog_record_services_create = models.CharField( - max_length=200, blank=False, null=True, - help_text='Services which are allowed to edit catalog records in the catalog.') + max_length=200, + blank=False, + null=True, + help_text="Services which are allowed to edit catalog records in the catalog.", + ) catalog_record_group_read = models.CharField( - max_length=200, blank=False, null=True, - help_text='Group which is allowed to read catalog records in the catalog.') + max_length=200, + blank=False, + null=True, + help_text="Group which is allowed to read catalog records in the catalog.", + ) catalog_record_services_read = models.CharField( - max_length=200, blank=False, null=True, - help_text='Services which are allowed to read catalog records in the catalog.') + max_length=200, + blank=False, + null=True, + help_text="Services which are allowed to read catalog records in the catalog.", + ) # END OF MODEL FIELD DEFINITIONS # - READ_METHODS = ('GET', 'HEAD', 'OPTIONS') + READ_METHODS = ("GET", "HEAD", "OPTIONS") def __init__(self, *args, **kwargs): super(DataCatalog, self).__init__(*args, **kwargs) - self.track_fields('catalog_json.identifier') + self.track_fields("catalog_json.identifier") def save(self, *args, **kwargs): if self._operation_is_update(): - if self.field_changed('catalog_json.identifier'): + if self.field_changed("catalog_json.identifier"): # read-only after creating - self.catalog_json['identifier'] = self._initial_data['catalog_json']['identifier'] + self.catalog_json["identifier"] = self._initial_data["catalog_json"]["identifier"] super(DataCatalog, self).save(*args, **kwargs) - def print_records(self): # pragma: no cover + def print_records(self): # pragma: no cover for r in self.records.all(): print(r) def __repr__(self): - return '<%s: %d, removed: %s, identifier: %s, research_dataset_schema=%s, dataset_versioning: %s >' % ( - 'DataCatalog', + return "<%s: %d, removed: %s, identifier: %s, research_dataset_schema=%s, dataset_versioning: %s >" % ( + "DataCatalog", self.id, str(self.removed), - self.catalog_json['identifier'], - self.catalog_json['research_dataset_schema'], - self.catalog_json['dataset_versioning'], + self.catalog_json["identifier"], + self.catalog_json["research_dataset_schema"], + self.catalog_json["dataset_versioning"], ) def user_has_access(self, request): @@ -81,4 +99,4 @@ def user_has_access(self, request): return False def delete(self): - super(DataCatalog, self).remove() \ No newline at end of file + super(DataCatalog, self).remove() diff --git a/src/metax_api/models/directory.py b/src/metax_api/models/directory.py index 20bfa264..5b8c1714 100755 --- a/src/metax_api/models/directory.py +++ b/src/metax_api/models/directory.py @@ -27,17 +27,19 @@ class Directory(Common): directory_path = models.TextField() identifier = models.CharField(max_length=200, unique=True) file_count = models.BigIntegerField(default=0) - parent_directory = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, related_name='child_directories') + parent_directory = models.ForeignKey( + "self", on_delete=models.SET_NULL, null=True, related_name="child_directories" + ) project_identifier = models.CharField(max_length=200) # END OF MODEL FIELD DEFINITIONS # class Meta: indexes = [ - models.Index(fields=['directory_path']), - models.Index(fields=['identifier']), - models.Index(fields=['parent_directory']), - models.Index(fields=['project_identifier']), + models.Index(fields=["directory_path"]), + models.Index(fields=["identifier"]), + models.Index(fields=["parent_directory"]), + models.Index(fields=["project_identifier"]), ] def delete(self): @@ -48,6 +50,7 @@ def user_has_access(self, request): if request.user.is_service: return True from metax_api.services import AuthService + return self.project_identifier in AuthService.get_user_projects(request) def calculate_byte_size_and_file_count(self): @@ -58,17 +61,20 @@ def calculate_byte_size_and_file_count(self): """ if self.parent_directory_id: raise Exception( - 'while this is a recursive method, it is intended to be initially called by ' - 'project root directories only.' + "while this is a recursive method, it is intended to be initially called by " + "project root directories only." ) - _logger.info('Calculating directory byte sizes and file counts for project %s...' % self.project_identifier) + _logger.info( + "Calculating directory byte sizes and file counts for project %s..." + % self.project_identifier + ) update_statements = [] self._calculate_byte_size_and_file_count(update_statements) - sql_update_all_directories = ''' + sql_update_all_directories = """ update metax_api_directory as d set byte_size = results.byte_size, file_count = results.file_count @@ -76,19 +82,21 @@ def calculate_byte_size_and_file_count(self): %s ) as results(byte_size, file_count, id) where results.id = d.id; - ''' % ','.join(update_statements) + """ % ",".join( + update_statements + ) with connection.cursor() as cursor: cursor.execute(sql_update_all_directories) _logger.info( - 'Project %s directory tree calculations complete. Total byte_size: ' - '%d bytes (%.3f GB), total file_count: %d files' + "Project %s directory tree calculations complete. Total byte_size: " + "%d bytes (%.3f GB), total file_count: %d files" % ( self.project_identifier, self.byte_size, self.byte_size / 1024 / 1024 / 1024, - self.file_count + self.file_count, ) ) @@ -101,10 +109,16 @@ def _calculate_byte_size_and_file_count(self, update_statements): self.file_count = 0 # fields id, parent_directory_id must be specified for joining for Prefetch-object to work properly - sub_dirs = self.child_directories.all() \ - .only('byte_size', 'parent_directory_id') \ + sub_dirs = ( + self.child_directories.all() + .only("byte_size", "parent_directory_id") .prefetch_related( - Prefetch('files', queryset=File.objects.only('id', 'byte_size', 'parent_directory_id'))) + Prefetch( + "files", + queryset=File.objects.only("id", "byte_size", "parent_directory_id"), + ) + ) + ) if sub_dirs: for sub_dir in sub_dirs: @@ -118,10 +132,7 @@ def _calculate_byte_size_and_file_count(self, update_statements): self.byte_size += sum(f.byte_size for f in self.files.all()) or 0 self.file_count += len(self.files.all()) or 0 - update_statements.append( - '(%d, %d, %d)' - % (self.byte_size, self.file_count, self.id) - ) + update_statements.append("(%d, %d, %d)" % (self.byte_size, self.file_count, self.id)) def calculate_byte_size_and_file_count_for_cr(self, cr_id, directory_data): """ @@ -130,13 +141,20 @@ def calculate_byte_size_and_file_count_for_cr(self, cr_id, directory_data): file count for each directory into parameter directory_data. Intended to be called for the top-level directories of a project in a dataset. """ - _logger.debug('Calculating directory byte sizes and file counts for project %s, directory %s...' % - (self.project_identifier, self.directory_path)) + _logger.debug( + "Calculating directory byte sizes and file counts for project %s, directory %s..." + % (self.project_identifier, self.directory_path) + ) - stats = File.objects.filter(record__pk=cr_id).values_list('parent_directory_id').annotate( - Sum('byte_size'), Count('id')) + stats = ( + File.objects.filter(record__pk=cr_id) + .values_list("parent_directory_id") + .annotate(Sum("byte_size"), Count("id")) + ) - grouped_by_dir = {parent_id: (byte_size, file_count) for parent_id, byte_size, file_count in stats} + grouped_by_dir = { + parent_id: (byte_size, file_count) for parent_id, byte_size, file_count in stats + } self._calculate_byte_size_and_file_count_for_cr(grouped_by_dir, directory_data) @@ -155,7 +173,7 @@ def _calculate_byte_size_and_file_count_for_cr(self, grouped_by_dir, directory_d self.byte_size = 0 self.file_count = 0 - sub_dirs = self.child_directories.all().only('id') + sub_dirs = self.child_directories.all().only("id") if sub_dirs: for sub_dir in sub_dirs: @@ -172,14 +190,17 @@ def _calculate_byte_size_and_file_count_for_cr(self, grouped_by_dir, directory_d self.file_count += current_dir[1] # the accumulated numbers that exist in the directory for given cr - directory_data[self.id] = [ self.byte_size, self.file_count ] + directory_data[self.id] = [self.byte_size, self.file_count] def __repr__(self): - return '<%s: %d, removed: %s, project_identifier: %s, identifier: %s, directory_path: %s >' % ( - 'Directory', - self.id, - str(self.removed), - self.project_identifier, - self.identifier, - self.directory_path + return ( + "<%s: %d, removed: %s, project_identifier: %s, identifier: %s, directory_path: %s >" + % ( + "Directory", + self.id, + str(self.removed), + self.project_identifier, + self.identifier, + self.directory_path, + ) ) diff --git a/src/metax_api/models/file.py b/src/metax_api/models/file.py index add9fa72..2f577619 100755 --- a/src/metax_api/models/file.py +++ b/src/metax_api/models/file.py @@ -13,22 +13,24 @@ class FileManager(CommonManager): - def get(self, *args, **kwargs): - if kwargs.get('using_dict', None): + if kwargs.get("using_dict", None): # for a simple "just get me the instance that equals this dict i have" search. # this is useful if during a request the url does not contain the identifier (bulk update), # and in generic operations where the type of object being handled is not known (also bulk operations). - row = kwargs.pop('using_dict') - if row.get('id', None): - kwargs['id'] = row['id'] - elif row.get('identifier', None): - kwargs['identifier'] = row['identifier'] + row = kwargs.pop("using_dict") + if row.get("id", None): + kwargs["id"] = row["id"] + elif row.get("identifier", None): + kwargs["identifier"] = row["identifier"] else: - raise ValidationError([ - 'this operation requires one of the following identifying keys to be present: %s' - % ', '.join([ 'id', 'identifier' ])]) + raise ValidationError( + [ + "this operation requires one of the following identifying keys to be present: %s" + % ", ".join(["id", "identifier"]) + ] + ) return super(FileManager, self).get(*args, **kwargs) @@ -48,22 +50,26 @@ class File(Common): file_modified = models.DateTimeField() file_name = models.TextField() file_path = models.TextField() - file_storage = models.ForeignKey('metax_api.FileStorage', on_delete=models.DO_NOTHING) + file_storage = models.ForeignKey("metax_api.FileStorage", on_delete=models.DO_NOTHING) file_uploaded = models.DateTimeField() identifier = models.CharField(max_length=200) open_access = models.BooleanField(default=False) - parent_directory = models.ForeignKey('metax_api.Directory', on_delete=models.SET_NULL, null=True, - related_name='files') + parent_directory = models.ForeignKey( + "metax_api.Directory", + on_delete=models.SET_NULL, + null=True, + related_name="files", + ) project_identifier = models.CharField(max_length=200) # END OF MODEL FIELD DEFINITIONS # class Meta: indexes = [ - models.Index(fields=['file_path']), - models.Index(fields=['identifier']), - models.Index(fields=['parent_directory']), - models.Index(fields=['project_identifier']), + models.Index(fields=["file_path"]), + models.Index(fields=["identifier"]), + models.Index(fields=["parent_directory"]), + models.Index(fields=["project_identifier"]), ] objects = FileManager() @@ -72,17 +78,18 @@ def user_has_access(self, request): if request.user.is_service: return True from metax_api.services import AuthService + return self.project_identifier in AuthService.get_user_projects(request) def __repr__(self): - return '<%s: %d, removed: %s, project_identifier: %s, identifier: %s, file_path: %s >' % ( - 'File', + return "<%s: %d, removed: %s, project_identifier: %s, identifier: %s, file_path: %s >" % ( + "File", self.id, str(self.removed), self.project_identifier, self.identifier, - self.file_path + self.file_path, ) def delete(self): - super(File, self).remove() \ No newline at end of file + super(File, self).remove() diff --git a/src/metax_api/models/file_storage.py b/src/metax_api/models/file_storage.py index 1a1d5287..fbe89086 100755 --- a/src/metax_api/models/file_storage.py +++ b/src/metax_api/models/file_storage.py @@ -20,23 +20,25 @@ class FileStorage(Common): def __init__(self, *args, **kwargs): super(FileStorage, self).__init__(*args, **kwargs) - self.track_fields('file_storage_json.identifier') + self.track_fields("file_storage_json.identifier") def save(self, *args, **kwargs): if self._operation_is_update(): - if self.field_changed('file_storage_json.identifier'): + if self.field_changed("file_storage_json.identifier"): # read-only after creating - self.file_storage_json['identifier'] = self._initial_data['file_storage_json']['identifier'] + self.file_storage_json["identifier"] = self._initial_data["file_storage_json"][ + "identifier" + ] super(FileStorage, self).save(*args, **kwargs) def __repr__(self): - return '<%s: %d, removed: %s, identifier: %s >' % ( - 'FileStorage', + return "<%s: %d, removed: %s, identifier: %s >" % ( + "FileStorage", self.id, str(self.removed), - self.file_storage_json['identifier'], + self.file_storage_json["identifier"], ) def delete(self): - super(FileStorage, self).remove() \ No newline at end of file + super(FileStorage, self).remove() diff --git a/src/metax_api/models/metax_user.py b/src/metax_api/models/metax_user.py index a83b7072..7d8bd10b 100755 --- a/src/metax_api/models/metax_user.py +++ b/src/metax_api/models/metax_user.py @@ -14,5 +14,6 @@ https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#using-a-custom-user-model-when-starting-a-project """ + class MetaxUser(AbstractUser): pass diff --git a/src/metax_api/models/xml_metadata.py b/src/metax_api/models/xml_metadata.py index 24572735..5d180933 100755 --- a/src/metax_api/models/xml_metadata.py +++ b/src/metax_api/models/xml_metadata.py @@ -22,10 +22,10 @@ class XmlMetadata(Common): # END OF MODEL FIELD DEFINITIONS # class Meta: - unique_together = ('namespace', 'file') + unique_together = ("namespace", "file") indexes = [ - models.Index(fields=['namespace']), + models.Index(fields=["namespace"]), ] def delete(self): diff --git a/src/metax_api/parsers/parsers.py b/src/metax_api/parsers/parsers.py index 9ab9fa9e..9ca82c33 100755 --- a/src/metax_api/parsers/parsers.py +++ b/src/metax_api/parsers/parsers.py @@ -13,9 +13,10 @@ lets it pass through to the views in its original form. """ + class XMLParser(parsers.BaseParser): - media_type = 'application/xml' + media_type = "application/xml" def parse(self, stream, media_type=None, parser_context=None): - return stream.read().decode('utf-8') + return stream.read().decode("utf-8") diff --git a/src/metax_api/permissions/permissions.py b/src/metax_api/permissions/permissions.py index 935e2d90..93865817 100755 --- a/src/metax_api/permissions/permissions.py +++ b/src/metax_api/permissions/permissions.py @@ -16,13 +16,13 @@ _logger = logging.getLogger(__name__) METHOD_MAP = { - 'GET': 'read', - 'HEAD': 'read', - 'OPTIONS': 'read', - 'POST': 'create', - 'PUT': 'update', - 'PATCH': 'update', - 'DELETE': 'delete', + "GET": "read", + "HEAD": "read", + "OPTIONS": "read", + "POST": "create", + "PUT": "update", + "PATCH": "update", + "DELETE": "delete", } @@ -76,32 +76,31 @@ def has_permission(self, request, view): api_name = view.get_api_name() has_perm = False - _logger.debug('Checking user permission for api %s...' % request.path) + _logger.debug("Checking user permission for api %s..." % request.path) - if api_type not in self.perms: # pragma: no cover + if api_type not in self.perms: # pragma: no cover _logger.error( - 'api_type %s not specified in self.perms - forbidding. this probably should not happen' + "api_type %s not specified in self.perms - forbidding. this probably should not happen" % api_type ) has_perm = False - elif api_name not in self.perms[api_type]: # pragma: no cover + elif api_name not in self.perms[api_type]: # pragma: no cover _logger.error( - 'api_name %s not specified in self.perms[\'%s\'] - forbidding. this probably should not happen' % - (api_name, api_type) + "api_name %s not specified in self.perms['%s'] - forbidding. this probably should not happen" + % (api_name, api_type) ) has_perm = False - elif api_type == 'rest': + elif api_type == "rest": has_perm = self._check_rest_perms(request, api_name) - elif api_type == 'rpc': + elif api_type == "rpc": has_perm = self._check_rpc_perms(request, api_name) else: - _logger.error('Unknown api %s' % request.path) + _logger.error("Unknown api %s" % request.path) raise Exception("request path not available") - _logger.debug( - 'user %s has_perm for api %s == %r' - % (request.user.username or '(anonymous)', request.path, has_perm) + "user %s has_perm for api %s == %r" + % (request.user.username or "(anonymous)", request.path, has_perm) ) return has_perm @@ -113,12 +112,14 @@ def _check_rest_perms(self, request, api_name): """ if request.method in METHOD_MAP: operation_type = METHOD_MAP[request.method] - if 'all' in self.perms['rest'][api_name].get(operation_type, []): + if "all" in self.perms["rest"][api_name].get(operation_type, []): has_perm = True else: has_perm = self._check_user_rest_perms(request, api_name, operation_type) if not has_perm: - _logger.error(f"access denied for user {request.user.username} in {api_name} with operation {operation_type}") + _logger.error( + f"access denied for user {request.user.username} in {api_name} with operation {operation_type}" + ) else: raise MethodNotAllowed return has_perm @@ -128,16 +129,22 @@ def _check_rpc_perms(self, request, api_name): Check if user (service user) or user type (endusers) has permission to use given RPC method. """ - rpc_method_name = request.path.split('/')[-1] - - if rpc_method_name not in self.perms['rpc'][api_name]: - raise Http400({ - 'detail': [ - 'Unknown RPC method: %s. Valid %s RPC methods are: %s' - % (rpc_method_name, api_name, ', '.join(self.perms['rpc'][api_name].keys())) - ] - }) - elif 'all' in self.perms['rpc'][api_name][rpc_method_name]['use']: + rpc_method_name = request.path.split("/")[-1] + + if rpc_method_name not in self.perms["rpc"][api_name]: + raise Http400( + { + "detail": [ + "Unknown RPC method: %s. Valid %s RPC methods are: %s" + % ( + rpc_method_name, + api_name, + ", ".join(self.perms["rpc"][api_name].keys()), + ) + ] + } + ) + elif "all" in self.perms["rpc"][api_name][rpc_method_name]["use"]: has_perm = True else: has_perm = self._check_user_rpc_perms(request, api_name, rpc_method_name) @@ -147,7 +154,7 @@ def _check_rpc_perms(self, request, api_name): def has_object_permission(self, request, view, obj): has_perm = obj.user_has_access(request) if not has_perm: - self.message = 'You are not permitted to access this resource.' + self.message = "You are not permitted to access this resource." return has_perm @@ -158,13 +165,13 @@ class EndUserPermissions(MetaxAPIPermissions): """ service_permission = False - message = 'End Users are not allowed to access this api.' + message = "End Users are not allowed to access this api." def _check_user_rest_perms(self, request, api_name, operation_type): - return 'endusers' in self.perms['rest'][api_name].get(operation_type, []) + return "endusers" in self.perms["rest"][api_name].get(operation_type, []) def _check_user_rpc_perms(self, request, api_name, rpc_method_name): - return 'endusers' in self.perms['rpc'][api_name][rpc_method_name]['use'] + return "endusers" in self.perms["rpc"][api_name][rpc_method_name]["use"] class ServicePermissions(MetaxAPIPermissions): @@ -175,7 +182,7 @@ class ServicePermissions(MetaxAPIPermissions): """ service_permission = True - message = 'Service %s is not allowed to access this api.' + message = "Service %s is not allowed to access this api." def has_permission(self, request, view): has_perm = super().has_permission(request, view) @@ -185,7 +192,7 @@ def has_permission(self, request, view): return has_perm def _check_user_rest_perms(self, request, api_name, operation_type): - return request.user.username in self.perms['rest'][api_name].get(operation_type, []) + return request.user.username in self.perms["rest"][api_name].get(operation_type, []) def _check_user_rpc_perms(self, request, api_name, rpc_method_name): - return request.user.username in self.perms['rpc'][api_name][rpc_method_name]['use'] + return request.user.username in self.perms["rpc"][api_name][rpc_method_name]["use"] diff --git a/src/metax_api/renderers/renderers.py b/src/metax_api/renderers/renderers.py index 440c12d7..c19a3b60 100755 --- a/src/metax_api/renderers/renderers.py +++ b/src/metax_api/renderers/renderers.py @@ -18,8 +18,8 @@ class HTMLToJSONRenderer(renderers.JSONRenderer): This renderer catches the 'text/html' Accept header, but returns JSON instead of html. """ - media_type = 'text/html' - charset = 'utf-8' + media_type = "text/html" + charset = "utf-8" class XMLRenderer(renderers.BaseRenderer): @@ -29,8 +29,8 @@ class XMLRenderer(renderers.BaseRenderer): just isnt complicated enough in django's opinion. """ - media_type = 'application/xml' - format = 'xml' + media_type = "application/xml" + format = "xml" def render(self, data, media_type=None, renderer_context=None): return data diff --git a/src/metax_api/services/api_error_service.py b/src/metax_api/services/api_error_service.py index 39952b23..a15b1a94 100755 --- a/src/metax_api/services/api_error_service.py +++ b/src/metax_api/services/api_error_service.py @@ -18,8 +18,7 @@ _logger = logging.getLogger(__name__) -class ApiErrorService(): - +class ApiErrorService: @staticmethod def flush_errors(): """ @@ -28,7 +27,7 @@ def flush_errors(): error_files = listdir(settings.ERROR_FILES_PATH) file_count = len(error_files) for ef in error_files: - remove_file('%s/%s' % (settings.ERROR_FILES_PATH, ef)) + remove_file("%s/%s" % (settings.ERROR_FILES_PATH, ef)) return file_count @staticmethod @@ -36,14 +35,14 @@ def remove_error_file(error_identifier): """ Delete a single error file. """ - remove_file('%s/%s.json' % (settings.ERROR_FILES_PATH, error_identifier)) + remove_file("%s/%s.json" % (settings.ERROR_FILES_PATH, error_identifier)) @staticmethod def retrieve_error_details(error_identifier): """ Retrieve complete data about a single error """ - with open('%s/%s.json' % (settings.ERROR_FILES_PATH, error_identifier), 'r') as f: + with open("%s/%s.json" % (settings.ERROR_FILES_PATH, error_identifier), "r") as f: return json_load(f) @staticmethod @@ -56,13 +55,17 @@ def retrieve_error_list(): error_list = [] for ef in error_files: - with open('%s/%s' % (settings.ERROR_FILES_PATH, ef), 'r') as f: + with open("%s/%s" % (settings.ERROR_FILES_PATH, ef), "r") as f: error_details = json_load(f) - error_details.pop('data', None) - error_details.pop('headers', None) - if len(str(error_details['response'])) > 200: - error_details['response'] = '%s ...(first 200 characters)' % str(error_details['response'])[:200] - error_details['traceback'] = '(last 200 characters) ...%s' % error_details['traceback'][-200:] + error_details.pop("data", None) + error_details.pop("headers", None) + if len(str(error_details["response"])) > 200: + error_details["response"] = ( + "%s ...(first 200 characters)" % str(error_details["response"])[:200] + ) + error_details["traceback"] = ( + "(last 200 characters) ...%s" % error_details["traceback"][-200:] + ) error_list.append(error_details) return error_list @@ -71,12 +74,12 @@ def store_error_details(request, response, exception=None, other={}): """ Store error and request details to disk to specified error file location. """ - current_time = str(get_tz_aware_now_without_micros()).replace(' ', 'T') + current_time = str(get_tz_aware_now_without_micros()).replace(" ", "T") - if request.method in ('POST', 'PUT', 'PATCH'): + if request.method in ("POST", "PUT", "PATCH"): # cast possible datetime objects to strings, because those cant be json-serialized... request_data = request.data - for date_field in ('date_modified', 'date_created'): + for date_field in ("date_modified", "date_created"): if isinstance(request_data, list): for item in request_data: if isinstance(item, dict) and date_field in item: @@ -89,47 +92,51 @@ def store_error_details(request, response, exception=None, other={}): request_data = None error_info = { - 'method': request.method, - 'user': request.user.username or 'guest', - 'data': request_data, - 'headers': { - k: v for k, v in request.META.items() - if k.startswith('HTTP_') and k != 'HTTP_AUTHORIZATION' + "method": request.method, + "user": request.user.username or "guest", + "data": request_data, + "headers": { + k: v + for k, v in request.META.items() + if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" }, - 'status_code': response.status_code, - 'response': response.data, - 'traceback': traceback.format_exc(), + "status_code": response.status_code, + "response": response.data, + "traceback": traceback.format_exc(), # during test case execution, RAW_URI is not set - 'url': request.META.get('RAW_URI', request.META.get('PATH_INFO', '???')), - 'identifier': '%s-%s' % (current_time[:19], str(uuid4())[:8]), - 'exception_time': current_time, + "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), + "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), + "exception_time": current_time, } if other: # may contain info that the request was a bulk operation - error_info['other'] = { k: v for k, v in other.items() } - if 'bulk_request' in other: - error_info['other']['data_row_count'] = len(request_data) + error_info["other"] = {k: v for k, v in other.items()} + if "bulk_request" in other: + error_info["other"]["data_row_count"] = len(request_data) try: - with open('%s/%s.json' % (settings.ERROR_FILES_PATH, error_info['identifier']), 'w') as f: + with open( + "%s/%s.json" % (settings.ERROR_FILES_PATH, error_info["identifier"]), + "w", + ) as f: json_dump(error_info, f) except: - _logger.exception('Failed to save error info...') + _logger.exception("Failed to save error info...") else: - response.data['error_identifier'] = error_info['identifier'] + response.data["error_identifier"] = error_info["identifier"] if response.status_code == 500: json_logger.error( - event='api_exception', + event="api_exception", error={ - 'error_identifier': error_info['identifier'], - 'status_code': response.status_code, - 'traceback': error_info['traceback'], - } + "error_identifier": error_info["identifier"], + "status_code": response.status_code, + "traceback": error_info["traceback"], + }, ) if executing_test_case(): - response.data['traceback'] = traceback.format_exc() + response.data["traceback"] = traceback.format_exc() diff --git a/src/metax_api/services/auth_service.py b/src/metax_api/services/auth_service.py index 795f1c48..bb2fee69 100755 --- a/src/metax_api/services/auth_service.py +++ b/src/metax_api/services/auth_service.py @@ -13,8 +13,8 @@ _logger = logging.getLogger(__name__) -class AuthService(): +class AuthService: @classmethod def get_user_projects(cls, request): """ @@ -26,12 +26,12 @@ def get_user_projects(cls, request): # in order to not leak the user any information. raise Http404 - if hasattr(request.user, 'user_projects'): + if hasattr(request.user, "user_projects"): return request.user.user_projects user_projects = cls.extract_file_projects_from_token(request.user.token) - username = request.user.token.get('CSCUserName', '') + username = request.user.token.get("CSCUserName", "") additional_projects = cls.get_additional_user_projects_from_file(username) user_projects.update(additional_projects) @@ -40,7 +40,7 @@ def get_user_projects(cls, request): @staticmethod def extract_file_projects_from_token(token): - ''' + """ Extract user's project identifiers from token claims. The user's token (request.user.token), when the user has access to some @@ -55,17 +55,17 @@ def extract_file_projects_from_token(token): ] Valid group names for IDA projects look like: IDA01:2001036. - ''' + """ if not token: return set() user_projects = set() - project_prefix = 'IDA01:' + project_prefix = "IDA01:" user_projects = set( - group.split(':')[-1] - for group in token.get('group_names', []) + group.split(":")[-1] + for group in token.get("group_names", []) if group.startswith(project_prefix) ) @@ -81,9 +81,9 @@ def get_additional_user_projects_from_file(username): additional_projects = None try: - with open(settings.ADDITIONAL_USER_PROJECTS_PATH, 'r') as file: + with open(settings.ADDITIONAL_USER_PROJECTS_PATH, "r") as file: additional_projects = json.load(file) - except FileNotFoundError: # noqa + except FileNotFoundError: # noqa _logger.info("No local file for user projects") except Exception as e: _logger.error(e) @@ -91,8 +91,9 @@ def get_additional_user_projects_from_file(username): if additional_projects: if not additional_projects.get(username, False): _logger.info("No projects for user '%s' on local file" % username) - elif not isinstance(additional_projects[username], list) \ - or not isinstance(additional_projects[username][0], str): + elif not isinstance(additional_projects[username], list) or not isinstance( + additional_projects[username][0], str + ): _logger.error("Projects on file are not list of strings") else: user_projects.update(p for p in additional_projects[username]) @@ -107,12 +108,11 @@ def check_user_groups_against_groups(cls, request, group_list): Return True if there is a match, otherwise False. """ - assert request.user is not None, 'request.user is None' - assert request.user.token is not None, 'request.user.token is None' + assert request.user is not None, "request.user is None" + assert request.user.token is not None, "request.user.token is None" user_projects = set( - group.split(':')[-1] - for group in request.user.token.get('group_names', []) + group.split(":")[-1] for group in request.user.token.get("group_names", []) ) user_projects.update(cls.get_additional_user_projects_from_file(request.user.username)) @@ -131,7 +131,7 @@ def check_services_against_allowed_services(cls, request, service_list): Return True if there is a match, otherwise False. """ - assert request.user.username is not None, 'request.user.username is None' + assert request.user.username is not None, "request.user.username is None" authenticated_service = request.user.username diff --git a/src/metax_api/services/callable_service.py b/src/metax_api/services/callable_service.py index ca204a58..eecf5569 100755 --- a/src/metax_api/services/callable_service.py +++ b/src/metax_api/services/callable_service.py @@ -10,7 +10,7 @@ _logger = logging.getLogger(__name__) -class _CallableService(): +class _CallableService: """ Methods to handle adding and executing callable objects, which will be executed @@ -40,13 +40,13 @@ def run_post_request_callables(self): if not self.post_request_callables: return - _logger.debug('Executing %d post_request_callables...' % len(self.post_request_callables)) + _logger.debug("Executing %d post_request_callables..." % len(self.post_request_callables)) for callable_obj in self.post_request_callables: try: callable_obj() except: - _logger.exception('Failed to execute post_request_callables') + _logger.exception("Failed to execute post_request_callables") # failure to execute a callable should fail the entire request self.clear_callables() raise diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index a40aaf33..98638fcf 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -39,8 +39,8 @@ except ImportError as e: _logger.error(e) -class CatalogRecordService(CommonService, ReferenceDataMixin): +class CatalogRecordService(CommonService, ReferenceDataMixin): @classmethod def get_queryset_search_params(cls, request): """ @@ -54,97 +54,112 @@ def get_queryset_search_params(cls, request): queryset_search_params = cls.filter_by_state(request, queryset_search_params) - if request.query_params.get('state', False): - state_vals = request.query_params['state'].split(',') + if request.query_params.get("state", False): + state_vals = request.query_params["state"].split(",") for val in state_vals: try: int(val) except ValueError: - raise Http400({ 'state': ['Value \'%s\' is not an integer' % val] }) - queryset_search_params['preservation_state__in'] = state_vals + raise Http400({"state": ["Value '%s' is not an integer" % val]}) + queryset_search_params["preservation_state__in"] = state_vals - if request.query_params.get('preservation_state', False): - state_vals = request.query_params['preservation_state'].split(',') + if request.query_params.get("preservation_state", False): + state_vals = request.query_params["preservation_state"].split(",") for val in state_vals: try: int(val) except ValueError: - raise Http400({ 'preservation_state': ['Value \'%s\' is not an integer' % val] }) - queryset_search_params['preservation_state__in'] = state_vals - - if CommonService.get_boolean_query_param(request, 'latest'): - queryset_search_params['next_dataset_version_id'] = None - - if request.query_params.get('deprecated', None) is not None: - queryset_search_params['deprecated'] = CommonService.get_boolean_query_param(request, 'deprecated') - - if request.query_params.get('curator', False): - queryset_search_params['research_dataset__contains'] = \ - {'curator': [{ 'identifier': request.query_params['curator']}]} - - if request.query_params.get('owner_id', False): - queryset_search_params['editor__contains'] = { 'owner_id': request.query_params['owner_id'] } - - if request.query_params.get('user_created', False): - queryset_search_params['user_created'] = request.query_params['user_created'] - - if request.query_params.get('editor', False): - queryset_search_params['editor__contains'] = { 'identifier': request.query_params['editor'] } - - if request.query_params.get('metadata_provider_user', False): - queryset_search_params['metadata_provider_user'] = request.query_params['metadata_provider_user'] - - if request.query_params.get('metadata_owner_org', False): - queryset_search_params['metadata_owner_org__in'] = request.query_params['metadata_owner_org'].split(',') - - if request.query_params.get('contract_org_identifier', False): - if request.user.username not in ('metax', 'tpas'): - raise Http403({ 'detail': ['query parameter pas_filter is restricted']}) - queryset_search_params['contract__contract_json__organization__organization_identifier__iregex'] = \ - request.query_params['contract_org_identifier'] - - if request.query_params.get('pas_filter', False): + raise Http400({"preservation_state": ["Value '%s' is not an integer" % val]}) + queryset_search_params["preservation_state__in"] = state_vals + + if CommonService.get_boolean_query_param(request, "latest"): + queryset_search_params["next_dataset_version_id"] = None + + if request.query_params.get("deprecated", None) is not None: + queryset_search_params["deprecated"] = CommonService.get_boolean_query_param( + request, "deprecated" + ) + + if request.query_params.get("curator", False): + queryset_search_params["research_dataset__contains"] = { + "curator": [{"identifier": request.query_params["curator"]}] + } + + if request.query_params.get("owner_id", False): + queryset_search_params["editor__contains"] = { + "owner_id": request.query_params["owner_id"] + } + + if request.query_params.get("user_created", False): + queryset_search_params["user_created"] = request.query_params["user_created"] + + if request.query_params.get("editor", False): + queryset_search_params["editor__contains"] = { + "identifier": request.query_params["editor"] + } + + if request.query_params.get("metadata_provider_user", False): + queryset_search_params["metadata_provider_user"] = request.query_params[ + "metadata_provider_user" + ] + + if request.query_params.get("metadata_owner_org", False): + queryset_search_params["metadata_owner_org__in"] = request.query_params[ + "metadata_owner_org" + ].split(",") + + if request.query_params.get("contract_org_identifier", False): + if request.user.username not in ("metax", "tpas"): + raise Http403({"detail": ["query parameter pas_filter is restricted"]}) + queryset_search_params[ + "contract__contract_json__organization__organization_identifier__iregex" + ] = request.query_params["contract_org_identifier"] + + if request.query_params.get("pas_filter", False): cls.set_pas_filter(queryset_search_params, request) if CommonService.has_research_agent_query_params(request): cls.set_actor_filters(queryset_search_params, request) - if request.query_params.get('data_catalog', False): - queryset_search_params['data_catalog__catalog_json__identifier__iregex'] = \ - request.query_params['data_catalog'] + if request.query_params.get("data_catalog", False): + queryset_search_params[ + "data_catalog__catalog_json__identifier__iregex" + ] = request.query_params["data_catalog"] - if request.query_params.get('api_version', False): + if request.query_params.get("api_version", False): try: - value = int(request.query_params['api_version']) + value = int(request.query_params["api_version"]) except ValueError: - value = request.query_params['api_version'] - raise Http400({ 'api_version': ['Value \'%s\' is not an integer' % value] }) + value = request.query_params["api_version"] + raise Http400({"api_version": ["Value '%s' is not an integer" % value]}) - queryset_search_params['api_meta__contains'] = { 'version': value } + queryset_search_params["api_meta__contains"] = {"version": value} return queryset_search_params @staticmethod def filter_by_state(request, queryset_search_params): - ''' + """ Helper method to filter returning data by state: unauthenticated users get only published data, and end users get only published & their own drafts - ''' + """ state_filter = None - if request.user.username is None: # unauthenticated user - state_filter = Q(state='published') + if request.user.username is None: # unauthenticated user + state_filter = Q(state="published") elif request.user.is_service: # service account pass - else: # enduser api - state_filter = Q(state='published') | Q(state='draft', metadata_provider_user=request.user.username) + else: # enduser api + state_filter = Q(state="published") | Q( + state="draft", metadata_provider_user=request.user.username + ) if state_filter: - if 'q_filters' in queryset_search_params: - queryset_search_params['q_filters'].append(state_filter) + if "q_filters" in queryset_search_params: + queryset_search_params["q_filters"].append(state_filter) else: - queryset_search_params['q_filters'] = [state_filter] + queryset_search_params["q_filters"] = [state_filter] return queryset_search_params @@ -156,66 +171,85 @@ def set_actor_filters(queryset_search_params, request): Organization filters also search matches from person's "member_of" field. Q-filters from multiple queries are AND'ed together eventually. """ + def _get_person_filter(agent, person): name_filter = Q() # only one publisher possible - if agent == 'publisher': - name_filter |= Q(**{ f'research_dataset__{agent}__name__iregex': person }) + if agent == "publisher": + name_filter |= Q(**{f"research_dataset__{agent}__name__iregex": person}) else: # having same problem as in set_pas_filter below.. for i in range(3): - name_filter |= Q(**{ f'research_dataset__{agent}__{i}__name__iregex': person }) + name_filter |= Q(**{f"research_dataset__{agent}__{i}__name__iregex": person}) - name_filter |= Q(**{ f'research_dataset__{agent}__contains': [{ 'name': person }] }) + name_filter |= Q(**{f"research_dataset__{agent}__contains": [{"name": person}]}) # regex will find matches from organization name fields so have to disable it - person_filter = Q(**{ f'research_dataset__{agent}__contains': [{ '@type': "Person" }] }) - name_filter.add(person_filter, 'AND') + person_filter = Q(**{f"research_dataset__{agent}__contains": [{"@type": "Person"}]}) + name_filter.add(person_filter, "AND") return name_filter def _get_org_filter(agent, org): name_filter = Q() # only one publisher possible - if agent == 'publisher': - name_filter |= (Q(**{ f'research_dataset__{agent}__name__en__iregex': org })) - name_filter |= (Q(**{ f'research_dataset__{agent}__name__fi__iregex': org })) - name_filter |= (Q(**{ f'research_dataset__{agent}__member_of__name__en__iregex': org })) - name_filter |= (Q(**{ f'research_dataset__{agent}__member_of__name__fi__iregex': org })) + if agent == "publisher": + name_filter |= Q(**{f"research_dataset__{agent}__name__en__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__name__fi__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__member_of__name__en__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__member_of__name__fi__iregex": org}) else: for i in range(3): - name_filter |= (Q(**{ f'research_dataset__{agent}__{i}__name__en__iregex': org })) - name_filter |= (Q(**{ f'research_dataset__{agent}__{i}__name__fi__iregex': org })) - name_filter |= (Q(**{ f'research_dataset__{agent}__{i}__member_of__name__en__iregex': org })) - name_filter |= (Q(**{ f'research_dataset__{agent}__{i}__member_of__name__fi__iregex': org })) - - name_filter |= (Q(**{ f'research_dataset__{agent}__contains': [{ 'name': {'en': org} }] })) - name_filter |= (Q(**{ f'research_dataset__{agent}__contains': [{ 'name': {'fi': org} }] })) - name_filter |= ( - Q(**{ f'research_dataset__{agent}__contains': [{ 'member_of': {'name': {'en': org}} }] }) + name_filter |= Q(**{f"research_dataset__{agent}__{i}__name__en__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__{i}__name__fi__iregex": org}) + name_filter |= Q( + **{f"research_dataset__{agent}__{i}__member_of__name__en__iregex": org} + ) + name_filter |= Q( + **{f"research_dataset__{agent}__{i}__member_of__name__fi__iregex": org} + ) + + name_filter |= Q( + **{f"research_dataset__{agent}__contains": [{"name": {"en": org}}]} + ) + name_filter |= Q( + **{f"research_dataset__{agent}__contains": [{"name": {"fi": org}}]} ) - name_filter |= ( - Q(**{ f'research_dataset__{agent}__contains': [{ 'member_of': {'name': {'fi': org}} }] }) + name_filter |= Q( + **{ + f"research_dataset__{agent}__contains": [ + {"member_of": {"name": {"en": org}}} + ] + } + ) + name_filter |= Q( + **{ + f"research_dataset__{agent}__contains": [ + {"member_of": {"name": {"fi": org}}} + ] + } ) return name_filter q_filter = Q() - separator = 'OR' if request.query_params.get('condition_separator', '').upper() == 'OR' else 'AND' + separator = ( + "OR" if request.query_params.get("condition_separator", "").upper() == "OR" else "AND" + ) - for agent in ['creator', 'curator', 'publisher', 'rights_holder']: - if request.query_params.get(f'{agent}_person'): - person = urllib.parse.unquote(request.query_params[f'{agent}_person']) + for agent in ["creator", "curator", "publisher", "rights_holder"]: + if request.query_params.get(f"{agent}_person"): + person = urllib.parse.unquote(request.query_params[f"{agent}_person"]) q_filter.add(_get_person_filter(agent, person), separator) - if request.query_params.get(f'{agent}_organization'): - org = urllib.parse.unquote(request.query_params[f'{agent}_organization']) + if request.query_params.get(f"{agent}_organization"): + org = urllib.parse.unquote(request.query_params[f"{agent}_organization"]) q_filter.add(_get_org_filter(agent, org), separator) - if 'q_filters' in queryset_search_params: # pragma: no cover - queryset_search_params['q_filters'].append(q_filter) + if "q_filters" in queryset_search_params: # pragma: no cover + queryset_search_params["q_filters"].append(q_filter) else: - queryset_search_params['q_filters'] = [q_filter] + queryset_search_params["q_filters"] = [q_filter] @staticmethod def set_pas_filter(queryset_search_params, request): @@ -223,10 +257,10 @@ def set_pas_filter(queryset_search_params, request): A somewhat specific filter for PAS needs... The below OR query is AND'ed with any other possible filters from other query parameters. """ - if request.user.username not in ('metax', 'tpas'): - raise Http403({ 'detail': ['query parameter pas_filter is restricted']}) + if request.user.username not in ("metax", "tpas"): + raise Http403({"detail": ["query parameter pas_filter is restricted"]}) - search_string = urllib.parse.unquote(request.query_params.get('pas_filter', '')) + search_string = urllib.parse.unquote(request.query_params.get("pas_filter", "")) # dataset title, from various languages... q1 = Q(research_dataset__title__en__iregex=search_string) @@ -244,17 +278,17 @@ def set_pas_filter(queryset_search_params, request): q4 = Q(research_dataset__curator__0__name__iregex=search_string) q5 = Q(research_dataset__curator__1__name__iregex=search_string) q6 = Q(research_dataset__curator__2__name__iregex=search_string) - q7 = Q(research_dataset__curator__contains=[{ 'name': search_string }]) + q7 = Q(research_dataset__curator__contains=[{"name": search_string}]) q_filter = q1 | q2 | q3 | q4 | q5 | q6 | q7 - if 'q_filters' in queryset_search_params: # pragma: no cover + if "q_filters" in queryset_search_params: # pragma: no cover # no usecase yet but leaving comment for future reference... if the need arises to # include Q-filters from multiple sources (query params), probably AND them together # by appending to list - queryset_search_params['q_filters'].append(q_filter) + queryset_search_params["q_filters"].append(q_filter) else: - queryset_search_params['q_filters'] = [q_filter] + queryset_search_params["q_filters"] = [q_filter] @staticmethod def populate_file_details(cr_json, request): @@ -269,10 +303,13 @@ def populate_file_details(cr_json, request): Note: Some of these results may be very useful to cache, or cache the entire dataset if feasible. """ - from metax_api.api.rest.base.serializers import LightDirectorySerializer, LightFileSerializer + from metax_api.api.rest.base.serializers import ( + LightDirectorySerializer, + LightFileSerializer, + ) - rd = cr_json['research_dataset'] - file_identifiers = [f['identifier'] for f in rd.get('files', [])] + rd = cr_json["research_dataset"] + file_identifiers = [f["identifier"] for f in rd.get("files", [])] # these fields must be retrieved from the db in order to do the mapping, even if they are not # requested when using ?file_fields=... or ?directory_fields=... ditch those fields @@ -283,113 +320,141 @@ def populate_file_details(cr_json, request): directory_fields, file_fields = FileService._get_requested_file_browsing_fields(request) - if 'identifier' not in directory_fields: - directory_fields.append('identifier') + if "identifier" not in directory_fields: + directory_fields.append("identifier") dir_identifier_requested = False - if 'identifier' not in file_fields: - file_fields.append('identifier') + if "identifier" not in file_fields: + file_fields.append("identifier") file_identifier_requested = False for file in File.objects.filter(identifier__in=file_identifiers).values(*file_fields): - for f in rd['files']: - if f['identifier'] == file['identifier']: - f['details'] = LightFileSerializer.serialize(file) + for f in rd["files"]: + if f["identifier"] == file["identifier"]: + f["details"] = LightFileSerializer.serialize(file) continue - dir_identifiers = [dr['identifier'] for dr in rd.get('directories', [])] + dir_identifiers = [dr["identifier"] for dr in rd.get("directories", [])] - for directory in Directory.objects.filter(identifier__in=dir_identifiers).values(*directory_fields): - for dr in rd['directories']: - if dr['identifier'] == directory['identifier']: - dr['details'] = LightDirectorySerializer.serialize(directory) + for directory in Directory.objects.filter(identifier__in=dir_identifiers).values( + *directory_fields + ): + for dr in rd["directories"]: + if dr["identifier"] == directory["identifier"]: + dr["details"] = LightDirectorySerializer.serialize(directory) continue if not dir_identifiers: return - if not directory_fields or ('byte_size' in directory_fields or 'file_count' in directory_fields): + if not directory_fields or ( + "byte_size" in directory_fields or "file_count" in directory_fields + ): # no specific fields requested -> retrieve, # OR byte_size or file_count among requested fields -> retrieve - _directory_data = CatalogRecord.objects.values_list('_directory_data', flat=True) \ - .get(pk=cr_json['id']) + _directory_data = CatalogRecord.objects.values_list("_directory_data", flat=True).get( + pk=cr_json["id"] + ) - for dr in rd['directories']: + for dr in rd["directories"]: - if 'details' not in dr: + if "details" not in dr: # probably the directory did not have its details populated # because the dataset is deprecated and the directory no longer exists continue - FileService.retrieve_directory_byte_sizes_and_file_counts_for_cr(dr['details'], - not_cr_id=None, directory_fields=directory_fields, cr_directory_data=_directory_data) + FileService.retrieve_directory_byte_sizes_and_file_counts_for_cr( + dr["details"], + not_cr_id=None, + directory_fields=directory_fields, + cr_directory_data=_directory_data, + ) # cleanup identifiers, if they were not actually requested if not dir_identifier_requested: - for dr in rd['directories']: - del dr['details']['identifier'] + for dr in rd["directories"]: + del dr["details"]["identifier"] if not file_identifier_requested: - for f in rd['files']: - del f['details']['identifier'] + for f in rd["files"]: + del f["details"]["identifier"] @classmethod - def transform_datasets_to_format(cls, catalog_records_json, target_format, - include_xml_declaration=True, request=None): + def transform_datasets_to_format( + cls, + catalog_records_json, + target_format, + include_xml_declaration=True, + request=None, + ): """ params: catalog_records: a list of catalog record dicts, or a single dict """ - if target_format in ('datacite', 'fairdata_datacite'): + if target_format in ("datacite", "fairdata_datacite"): - is_strict = target_format == 'datacite' + is_strict = target_format == "datacite" dummy_doi = False if request: - dummy_doi = CommonService.get_boolean_query_param(request, 'dummy_doi') + dummy_doi = CommonService.get_boolean_query_param(request, "dummy_doi") return DataciteService().convert_catalog_record_to_datacite_xml( - catalog_records_json, include_xml_declaration, is_strict, dummy_doi=dummy_doi) + catalog_records_json, + include_xml_declaration, + is_strict, + dummy_doi=dummy_doi, + ) def _preprocess_list(key, value): """ Helper function to get right structure for list values. This function is called recursively. """ - if key not in ['item', 'researchdataset'] and isinstance(value, list) and len(value) > 1: - value = {'item': value} + if ( + key not in ["item", "researchdataset"] + and isinstance(value, list) + and len(value) > 1 + ): + value = {"item": value} return key, value if isinstance(catalog_records_json, dict): - content_to_transform = { 'researchdataset': catalog_records_json['research_dataset'] } + content_to_transform = {"researchdataset": catalog_records_json["research_dataset"]} else: - rd_list = { 'researchdataset': (cr['research_dataset'] for cr in catalog_records_json) } - content_to_transform = { 'researchdatasets': rd_list } + rd_list = {"researchdataset": (cr["research_dataset"] for cr in catalog_records_json)} + content_to_transform = {"researchdatasets": rd_list} xml_str = xmltodict.unparse(content_to_transform, preprocessor=_preprocess_list) - xml_str = xml_str.replace('\n', '', 1) + xml_str = xml_str.replace("\n", "", 1) # This is a bit ugly way to put the metax data to the datacite namespace, # which allows us to use the default namespace in xquery files. - xml_str = xml_str.replace('', - '') - if target_format == 'metax': + xml_str = xml_str.replace( + "", + '', + ) + if target_format == "metax": # mostly for debugging purposes, the 'metax xml' can be returned as well return xml_str - target_xslt_file_path = join(dirname(dirname(__file__)), 'api/rest/base/xslt/%s.xslt' % target_format) + target_xslt_file_path = join( + dirname(dirname(__file__)), "api/rest/base/xslt/%s.xslt" % target_format + ) try: with open(target_xslt_file_path) as f: xslt = f.read() except OSError: - raise Http400('Requested format \'%s\' is not available' % target_format) + raise Http400("Requested format '%s' is not available" % target_format) try: transformed_xml = sxq.execute(xslt, xml_str) except: - _logger.exception('Something is wrong with the xslt file at %s:' % target_xslt_file_path) - raise Http503('Requested format \'%s\' is currently unavailable' % target_format) + _logger.exception( + "Something is wrong with the xslt file at %s:" % target_xslt_file_path + ) + raise Http503("Requested format '%s' is currently unavailable" % target_format) if include_xml_declaration: return '%s' % transformed_xml @@ -412,249 +477,414 @@ def validate_reference_data(cls, research_dataset, cache): """ reference_data = cls.get_reference_data(cache) # ic(reference_data) - refdata = reference_data['reference_data'] - orgdata = reference_data['organization_data']['organization'] + refdata = reference_data["reference_data"] + orgdata = reference_data["organization_data"]["organization"] errors = defaultdict(list) - for theme in research_dataset.get('theme', []): - ref_entry = cls.check_ref_data(refdata['keyword'], theme['identifier'], - 'research_dataset.theme.identifier', errors) + for theme in research_dataset.get("theme", []): + ref_entry = cls.check_ref_data( + refdata["keyword"], + theme["identifier"], + "research_dataset.theme.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, theme, label_field='pref_label') - - for fos in research_dataset.get('field_of_science', []): - ref_entry = cls.check_ref_data(refdata['field_of_science'], fos['identifier'], - 'research_dataset.field_of_science.identifier', errors) + cls.populate_from_ref_data(ref_entry, theme, label_field="pref_label") + + for fos in research_dataset.get("field_of_science", []): + ref_entry = cls.check_ref_data( + refdata["field_of_science"], + fos["identifier"], + "research_dataset.field_of_science.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, fos, label_field='pref_label') + cls.populate_from_ref_data(ref_entry, fos, label_field="pref_label") - for remote_resource in research_dataset.get('remote_resources', []): + for remote_resource in research_dataset.get("remote_resources", []): - for license in remote_resource.get('license', []): - license_id = license.get('identifier', False) - license_url = license.get('license', False) + for license in remote_resource.get("license", []): + license_id = license.get("identifier", False) + license_url = license.get("license", False) if license_id: - ref_entry = cls.check_ref_data(refdata['license'], license['identifier'], - 'research_dataset.remote_resources.license.identifier', errors) + ref_entry = cls.check_ref_data( + refdata["license"], + license["identifier"], + "research_dataset.remote_resources.license.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, license, label_field='title', add_in_scheme=False) + cls.populate_from_ref_data( + ref_entry, license, label_field="title", add_in_scheme=False + ) # Populate license field from reference data only if it is empty, i.e. not provided by the user # and when the reference data license has a same_as entry - if not license_url and ref_entry.get('same_as', False): - license_url = ref_entry['same_as'] + if not license_url and ref_entry.get("same_as", False): + license_url = ref_entry["same_as"] if license_url: - license['license'] = license_url - - if remote_resource.get('resource_type', False): - ref_entry = cls.check_ref_data(refdata['resource_type'], remote_resource['resource_type']['identifier'], - 'research_dataset.remote_resources.resource_type.identifier', errors) + license["license"] = license_url + + if remote_resource.get("resource_type", False): + ref_entry = cls.check_ref_data( + refdata["resource_type"], + remote_resource["resource_type"]["identifier"], + "research_dataset.remote_resources.resource_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, remote_resource['resource_type'], label_field='pref_label') - - if remote_resource.get('file_type', False): - ref_entry = cls.check_ref_data(refdata['file_type'], remote_resource['file_type']['identifier'], - 'research_dataset.remote_resources.file_type.identifier', errors) + cls.populate_from_ref_data( + ref_entry, + remote_resource["resource_type"], + label_field="pref_label", + ) + + if remote_resource.get("file_type", False): + ref_entry = cls.check_ref_data( + refdata["file_type"], + remote_resource["file_type"]["identifier"], + "research_dataset.remote_resources.file_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, remote_resource['file_type'], label_field='pref_label') - - if remote_resource.get('use_category', False): - ref_entry = cls.check_ref_data(refdata['use_category'], remote_resource['use_category']['identifier'], - 'research_dataset.remote_resources.use_category.identifier', errors) + cls.populate_from_ref_data( + ref_entry, + remote_resource["file_type"], + label_field="pref_label", + ) + + if remote_resource.get("use_category", False): + ref_entry = cls.check_ref_data( + refdata["use_category"], + remote_resource["use_category"]["identifier"], + "research_dataset.remote_resources.use_category.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, remote_resource['use_category'], label_field='pref_label') - - for language in research_dataset.get('language', []): - ref_entry = cls.check_ref_data(refdata['language'], language['identifier'], - 'research_dataset.language.identifier', errors) + cls.populate_from_ref_data( + ref_entry, + remote_resource["use_category"], + label_field="pref_label", + ) + + for language in research_dataset.get("language", []): + ref_entry = cls.check_ref_data( + refdata["language"], + language["identifier"], + "research_dataset.language.identifier", + errors, + ) if ref_entry: - label_field = 'title' - cls.populate_from_ref_data(ref_entry, language, label_field=label_field, add_in_scheme=False) + label_field = "title" + cls.populate_from_ref_data( + ref_entry, language, label_field=label_field, add_in_scheme=False + ) cls.remove_language_obj_irrelevant_titles(language, label_field) - access_rights = research_dataset.get('access_rights', None) + access_rights = research_dataset.get("access_rights", None) if access_rights: - if 'access_type' in access_rights: - ref_entry = cls.check_ref_data(refdata['access_type'], access_rights['access_type']['identifier'], - 'research_dataset.access_rights.access_type.identifier', errors) + if "access_type" in access_rights: + ref_entry = cls.check_ref_data( + refdata["access_type"], + access_rights["access_type"]["identifier"], + "research_dataset.access_rights.access_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, access_rights['access_type'], label_field='pref_label') - - for rg in access_rights.get('restriction_grounds', []): - ref_entry = cls.check_ref_data(refdata['restriction_grounds'], rg['identifier'], - 'research_dataset.access_rights.restriction_grounds.identifier', errors) + cls.populate_from_ref_data( + ref_entry, + access_rights["access_type"], + label_field="pref_label", + ) + + for rg in access_rights.get("restriction_grounds", []): + ref_entry = cls.check_ref_data( + refdata["restriction_grounds"], + rg["identifier"], + "research_dataset.access_rights.restriction_grounds.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, rg, label_field='pref_label') + cls.populate_from_ref_data(ref_entry, rg, label_field="pref_label") - for license in access_rights.get('license', []): - license_id = license.get('identifier', False) - license_url = license.get('license', False) + for license in access_rights.get("license", []): + license_id = license.get("identifier", False) + license_url = license.get("license", False) if license_id: - ref_entry = cls.check_ref_data(refdata['license'], license_id, - 'research_dataset.access_rights.license.identifier', errors) + ref_entry = cls.check_ref_data( + refdata["license"], + license_id, + "research_dataset.access_rights.license.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, license, label_field='title', add_in_scheme=False) + cls.populate_from_ref_data( + ref_entry, license, label_field="title", add_in_scheme=False + ) # Populate license field from reference data only if it is empty, i.e. not provided by the user # and when the reference data license has a same_as entry - if not license_url and ref_entry.get('same_as', False): - license_url = ref_entry['same_as'] + if not license_url and ref_entry.get("same_as", False): + license_url = ref_entry["same_as"] if license_url: - license['license'] = license_url - - for project in research_dataset.get('is_output_of', []): - for org_obj in project.get('source_organization', []): - cls.process_org_obj_against_ref_data(orgdata, org_obj, - 'research_dataset.is_output_of.source_organization', - refdata=refdata, errors=errors) - - for org_obj in project.get('has_funding_agency', []): - cls.process_org_obj_against_ref_data(orgdata, org_obj, - 'research_dataset.is_output_of.has_funding_agency', - refdata=refdata, errors=errors) - - if project.get('funder_type', False): - ref_entry = cls.check_ref_data(refdata['funder_type'], project['funder_type']['identifier'], - 'research_dataset.is_output_of.funder_type.identifier', errors) - if ref_entry: - cls.populate_from_ref_data(ref_entry, project['funder_type'], label_field='pref_label') + license["license"] = license_url + + for project in research_dataset.get("is_output_of", []): + for org_obj in project.get("source_organization", []): + cls.process_org_obj_against_ref_data( + orgdata, + org_obj, + "research_dataset.is_output_of.source_organization", + refdata=refdata, + errors=errors, + ) - for other_identifier in research_dataset.get('other_identifier', []): - if other_identifier.get('type', False): - ref_entry = cls.check_ref_data(refdata['identifier_type'], other_identifier['type']['identifier'], - 'research_dataset.other_identifier.type.identifier', errors) - if ref_entry: - cls.populate_from_ref_data(ref_entry, other_identifier['type'], label_field='pref_label') + for org_obj in project.get("has_funding_agency", []): + cls.process_org_obj_against_ref_data( + orgdata, + org_obj, + "research_dataset.is_output_of.has_funding_agency", + refdata=refdata, + errors=errors, + ) - if other_identifier.get('provider', False): - cls.process_org_obj_against_ref_data(orgdata, other_identifier['provider'], - 'research_dataset.other_identifier.provider', refdata=refdata, - errors=errors) + if project.get("funder_type", False): + ref_entry = cls.check_ref_data( + refdata["funder_type"], + project["funder_type"]["identifier"], + "research_dataset.is_output_of.funder_type.identifier", + errors, + ) + if ref_entry: + cls.populate_from_ref_data( + ref_entry, project["funder_type"], label_field="pref_label" + ) + + for other_identifier in research_dataset.get("other_identifier", []): + if other_identifier.get("type", False): + ref_entry = cls.check_ref_data( + refdata["identifier_type"], + other_identifier["type"]["identifier"], + "research_dataset.other_identifier.type.identifier", + errors, + ) + if ref_entry: + cls.populate_from_ref_data( + ref_entry, other_identifier["type"], label_field="pref_label" + ) + + if other_identifier.get("provider", False): + cls.process_org_obj_against_ref_data( + orgdata, + other_identifier["provider"], + "research_dataset.other_identifier.provider", + refdata=refdata, + errors=errors, + ) - for spatial in research_dataset.get('spatial', []): - as_wkt = spatial.get('as_wkt', []) + for spatial in research_dataset.get("spatial", []): + as_wkt = spatial.get("as_wkt", []) - if spatial.get('place_uri', False): - place_uri = spatial.get('place_uri') - ref_entry = cls.check_ref_data(refdata['location'], place_uri['identifier'], - 'research_dataset.spatial.place_uri.identifier', errors) + if spatial.get("place_uri", False): + place_uri = spatial.get("place_uri") + ref_entry = cls.check_ref_data( + refdata["location"], + place_uri["identifier"], + "research_dataset.spatial.place_uri.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, place_uri, label_field='pref_label') + cls.populate_from_ref_data(ref_entry, place_uri, label_field="pref_label") # Populate as_wkt field from reference data only if it is empty, i.e. not provided by the user # and when the coordinates are available in the reference data - if not as_wkt and ref_entry.get('wkt', False): - as_wkt.append(ref_entry.get('wkt')) + if not as_wkt and ref_entry.get("wkt", False): + as_wkt.append(ref_entry.get("wkt")) if as_wkt: - spatial['as_wkt'] = as_wkt - - for file in research_dataset.get('files', []): - if file.get('file_type', False): - ref_entry = cls.check_ref_data(refdata['file_type'], file['file_type']['identifier'], - 'research_dataset.files.file_type.identifier', errors) + spatial["as_wkt"] = as_wkt + + for file in research_dataset.get("files", []): + if file.get("file_type", False): + ref_entry = cls.check_ref_data( + refdata["file_type"], + file["file_type"]["identifier"], + "research_dataset.files.file_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, file['file_type'], label_field='pref_label') - - if file.get('use_category', False): - ref_entry = cls.check_ref_data(refdata['use_category'], file['use_category']['identifier'], - 'research_dataset.files.use_category.identifier', errors) + cls.populate_from_ref_data( + ref_entry, file["file_type"], label_field="pref_label" + ) + + if file.get("use_category", False): + ref_entry = cls.check_ref_data( + refdata["use_category"], + file["use_category"]["identifier"], + "research_dataset.files.use_category.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, file['use_category'], label_field='pref_label') - - for directory in research_dataset.get('directories', []): - if directory.get('use_category', False): - ref_entry = cls.check_ref_data(refdata['use_category'], directory['use_category']['identifier'], - 'research_dataset.directories.use_category.identifier', errors) + cls.populate_from_ref_data( + ref_entry, file["use_category"], label_field="pref_label" + ) + + for directory in research_dataset.get("directories", []): + if directory.get("use_category", False): + ref_entry = cls.check_ref_data( + refdata["use_category"], + directory["use_category"]["identifier"], + "research_dataset.directories.use_category.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, directory['use_category'], label_field='pref_label') - - for contributor in research_dataset.get('contributor', []): - cls.process_research_agent_obj_with_type(orgdata, refdata, errors, contributor, - 'research_dataset.contributor') - - if research_dataset.get('publisher', False): - cls.process_research_agent_obj_with_type(orgdata, refdata, errors, research_dataset['publisher'], - 'research_dataset.publisher') - - for curator in research_dataset.get('curator', []): - cls.process_research_agent_obj_with_type(orgdata, refdata, errors, curator, 'research_dataset.curator') - - for creator in research_dataset.get('creator', []): - cls.process_research_agent_obj_with_type(orgdata, refdata, errors, creator, 'research_dataset.creator') - - for rights_holder in research_dataset.get('rights_holder', []): - cls.process_research_agent_obj_with_type(orgdata, refdata, errors, rights_holder, - 'research_dataset.rights_holder') - - for activity in research_dataset.get('provenance', []): - for was_associated_with in activity.get('was_associated_with', []): - cls.process_research_agent_obj_with_type(orgdata, refdata, errors, was_associated_with, - 'research_dataset.provenance.was_associated_with') - - if activity.get('spatial', False): - spatial = activity['spatial'] - as_wkt = spatial.get('as_wkt', []) + cls.populate_from_ref_data( + ref_entry, directory["use_category"], label_field="pref_label" + ) + + for contributor in research_dataset.get("contributor", []): + cls.process_research_agent_obj_with_type( + orgdata, refdata, errors, contributor, "research_dataset.contributor" + ) + + if research_dataset.get("publisher", False): + cls.process_research_agent_obj_with_type( + orgdata, + refdata, + errors, + research_dataset["publisher"], + "research_dataset.publisher", + ) + + for curator in research_dataset.get("curator", []): + cls.process_research_agent_obj_with_type( + orgdata, refdata, errors, curator, "research_dataset.curator" + ) + + for creator in research_dataset.get("creator", []): + cls.process_research_agent_obj_with_type( + orgdata, refdata, errors, creator, "research_dataset.creator" + ) + + for rights_holder in research_dataset.get("rights_holder", []): + cls.process_research_agent_obj_with_type( + orgdata, + refdata, + errors, + rights_holder, + "research_dataset.rights_holder", + ) + + for activity in research_dataset.get("provenance", []): + for was_associated_with in activity.get("was_associated_with", []): + cls.process_research_agent_obj_with_type( + orgdata, + refdata, + errors, + was_associated_with, + "research_dataset.provenance.was_associated_with", + ) - if spatial.get('place_uri', False): - place_uri = spatial.get('place_uri') - ref_entry = cls.check_ref_data(refdata['location'], place_uri['identifier'], - 'research_dataset.provenance.spatial.place_uri.identifier', errors) + if activity.get("spatial", False): + spatial = activity["spatial"] + as_wkt = spatial.get("as_wkt", []) + + if spatial.get("place_uri", False): + place_uri = spatial.get("place_uri") + ref_entry = cls.check_ref_data( + refdata["location"], + place_uri["identifier"], + "research_dataset.provenance.spatial.place_uri.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, place_uri, label_field='pref_label') + cls.populate_from_ref_data(ref_entry, place_uri, label_field="pref_label") # Populate as_wkt field from reference data only if it is empty, i.e. not provided by the user # and when the coordinates are available in the reference data - if not as_wkt and ref_entry.get('wkt', False): - as_wkt.append(ref_entry.get('wkt')) + if not as_wkt and ref_entry.get("wkt", False): + as_wkt.append(ref_entry.get("wkt")) if as_wkt: - spatial['as_wkt'] = as_wkt - - if activity.get('lifecycle_event', False): - ref_entry = cls.check_ref_data(refdata['lifecycle_event'], - activity['lifecycle_event']['identifier'], - 'research_dataset.provenance.lifecycle_event.identifier', errors) + spatial["as_wkt"] = as_wkt + + if activity.get("lifecycle_event", False): + ref_entry = cls.check_ref_data( + refdata["lifecycle_event"], + activity["lifecycle_event"]["identifier"], + "research_dataset.provenance.lifecycle_event.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, activity['lifecycle_event'], label_field='pref_label') - - if activity.get('preservation_event', False): - ref_entry = cls.check_ref_data(refdata['preservation_event'], - activity['preservation_event']['identifier'], - 'research_dataset.provenance.preservation_event.identifier', errors) + cls.populate_from_ref_data( + ref_entry, activity["lifecycle_event"], label_field="pref_label" + ) + + if activity.get("preservation_event", False): + ref_entry = cls.check_ref_data( + refdata["preservation_event"], + activity["preservation_event"]["identifier"], + "research_dataset.provenance.preservation_event.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, activity['preservation_event'], label_field='pref_label') - - if activity.get('event_outcome', False): - ref_entry = cls.check_ref_data(refdata['event_outcome'], - activity['event_outcome']['identifier'], - 'research_dataset.provenance.event_outcome.identifier', errors) + cls.populate_from_ref_data( + ref_entry, + activity["preservation_event"], + label_field="pref_label", + ) + + if activity.get("event_outcome", False): + ref_entry = cls.check_ref_data( + refdata["event_outcome"], + activity["event_outcome"]["identifier"], + "research_dataset.provenance.event_outcome.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, activity['event_outcome'], label_field='pref_label') - for infra in research_dataset.get('infrastructure', []): - ref_entry = cls.check_ref_data(refdata['research_infra'], infra['identifier'], - 'research_dataset.infrastructure.identifier', errors) + cls.populate_from_ref_data( + ref_entry, activity["event_outcome"], label_field="pref_label" + ) + for infra in research_dataset.get("infrastructure", []): + ref_entry = cls.check_ref_data( + refdata["research_infra"], + infra["identifier"], + "research_dataset.infrastructure.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, infra, label_field='pref_label') - - for relation in research_dataset.get('relation', []): - if relation.get('relation_type', False): - ref_entry = cls.check_ref_data(refdata['relation_type'], relation['relation_type']['identifier'], - 'research_dataset.relation.relation_type.identifier', errors) + cls.populate_from_ref_data(ref_entry, infra, label_field="pref_label") + + for relation in research_dataset.get("relation", []): + if relation.get("relation_type", False): + ref_entry = cls.check_ref_data( + refdata["relation_type"], + relation["relation_type"]["identifier"], + "research_dataset.relation.relation_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, relation['relation_type'], label_field='pref_label') + cls.populate_from_ref_data( + ref_entry, relation["relation_type"], label_field="pref_label" + ) - if relation.get('entity', False) and relation.get('entity').get('type', False): + if relation.get("entity", False) and relation.get("entity").get("type", False): - ref_entry = cls.check_ref_data(refdata['resource_type'], relation['entity']['type']['identifier'], - 'research_dataset.relation.entity.type.identifier', errors) + ref_entry = cls.check_ref_data( + refdata["resource_type"], + relation["entity"]["type"]["identifier"], + "research_dataset.relation.entity.type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, relation['entity']['type'], label_field='pref_label') + cls.populate_from_ref_data( + ref_entry, relation["entity"]["type"], label_field="pref_label" + ) if errors: raise ValidationError(errors) @@ -668,7 +898,7 @@ def remove_contact_info_metadata(cls, rd): :param rd: :return: research dataset with removed contact information """ - return remove_keys_recursively(rd, ['email', 'telephone', 'phone']) + return remove_keys_recursively(rd, ["email", "telephone", "phone"]) @classmethod def check_and_remove_metadata_based_on_access_type(cls, rd): @@ -681,21 +911,25 @@ def check_and_remove_metadata_based_on_access_type(cls, rd): """ access_type_id = cls.get_research_dataset_access_type(rd) - if access_type_id == ACCESS_TYPES['open']: + if access_type_id == ACCESS_TYPES["open"]: pass - elif access_type_id == ACCESS_TYPES['login']: + elif access_type_id == ACCESS_TYPES["login"]: pass - elif access_type_id == ACCESS_TYPES['permit']: + elif access_type_id == ACCESS_TYPES["permit"]: # TODO: # If user does not have rems permission for the catalog record, strip it: # cls._strip_file_and_directory_metadata(rd) # strip always for now. Remove this part when rems checking is implemented cls._strip_file_and_directory_metadata(rd) - elif access_type_id == ACCESS_TYPES['embargo']: + elif access_type_id == ACCESS_TYPES["embargo"]: try: - embargo_time_passed = get_tz_aware_now_without_micros() >= \ - parse_timestamp_string_to_tz_aware_datetime(cls.get_research_dataset_embargo_available(rd)) + embargo_time_passed = ( + get_tz_aware_now_without_micros() + >= parse_timestamp_string_to_tz_aware_datetime( + cls.get_research_dataset_embargo_available(rd) + ) + ) except Exception as e: _logger.error(e) embargo_time_passed = False @@ -723,13 +957,13 @@ def _strip_file_metadata(cls, rd): :param rd: """ - file_keys_to_leave = set(['title', 'use_category', 'file_type', 'details']) - details_keys_to_leave = set(['byte_size']) + file_keys_to_leave = set(["title", "use_category", "file_type", "details"]) + details_keys_to_leave = set(["byte_size"]) - for file in rd.get('files', []): + for file in rd.get("files", []): leave_keys_in_dict(file, file_keys_to_leave) - if 'details' in file: - leave_keys_in_dict(file['details'], details_keys_to_leave) + if "details" in file: + leave_keys_in_dict(file["details"], details_keys_to_leave) @classmethod def _strip_directory_metadata(cls, rd): @@ -741,33 +975,33 @@ def _strip_directory_metadata(cls, rd): :param rd: """ - dir_keys_to_leave = set(['title', 'use_category', 'details']) - details_keys_to_leave = set(['byte_size']) + dir_keys_to_leave = set(["title", "use_category", "details"]) + details_keys_to_leave = set(["byte_size"]) - for dir in rd.get('directories', []): + for dir in rd.get("directories", []): leave_keys_in_dict(dir, dir_keys_to_leave) - if 'details' in dir: - leave_keys_in_dict(dir['details'], details_keys_to_leave) + if "details" in dir: + leave_keys_in_dict(dir["details"], details_keys_to_leave) @staticmethod def get_research_dataset_access_type(rd): - return rd.get('access_rights', {}).get('access_type', {}).get('identifier', '') + return rd.get("access_rights", {}).get("access_type", {}).get("identifier", "") @staticmethod def get_research_dataset_embargo_available(rd): - return rd.get('access_rights', {}).get('available', '') + return rd.get("access_rights", {}).get("available", "") @staticmethod def get_research_dataset_license_url(rd): """ Return identifier of the first license if there is a license at all """ - if not rd.get('access_rights', {}).get('license'): + if not rd.get("access_rights", {}).get("license"): return {} - license = rd['access_rights']['license'][0] + license = rd["access_rights"]["license"][0] - return license.get('identifier') or license.get('license') + return license.get("identifier") or license.get("license") @classmethod def destroy_bulk(cls, request): @@ -775,7 +1009,7 @@ def destroy_bulk(cls, request): Mark datasets as deleted en masse. Parameter cr_identifiers can be a list of pk's (integers), or file identifiers (strings). """ - _logger.info('Begin bulk delete datasets') + _logger.info("Begin bulk delete datasets") cr_ids = cls.identifiers_to_ids(request.data) cr_deleted = [] @@ -791,10 +1025,10 @@ def destroy_bulk(cls, request): pass if sorted(no_access) == sorted(cr_ids): - raise Http403({ 'detail': ['None of datasets exists or are permitted for users']}) + raise Http403({"detail": ["None of datasets exists or are permitted for users"]}) if not cr_deleted: return Response(cr_deleted, status=status.HTTP_404_NOT_FOUND) - _logger.info(f'Marked datasets {cr_deleted} as deleted') + _logger.info(f"Marked datasets {cr_deleted} as deleted") return Response(cr_deleted, status=status.HTTP_200_OK) diff --git a/src/metax_api/services/catalog_record_service_v2.py b/src/metax_api/services/catalog_record_service_v2.py index 92ca473f..a51c6b75 100755 --- a/src/metax_api/services/catalog_record_service_v2.py +++ b/src/metax_api/services/catalog_record_service_v2.py @@ -12,25 +12,24 @@ class CatalogRecordServiceV2(CatalogRecordService): - @classmethod def get_queryset_search_params(cls, request): """ v1 API has query params state and preservation_state doing the same thing. v2 API will properly use state to filter by field state. """ - state = request.query_params.get('state', None) + state = request.query_params.get("state", None) if state: # note: request.query_params is a @property of the request object. can not be directly edited. # see rest_framework/request.py request._request.GET._mutable = True - request.query_params.pop('state', None) + request.query_params.pop("state", None) request._request.GET._mutable = False queryset_search_params = super().get_queryset_search_params(request) if state: - queryset_search_params['state'] = state + queryset_search_params["state"] = state return queryset_search_params diff --git a/src/metax_api/services/common_service.py b/src/metax_api/services/common_service.py index 44e01d3b..9be5abdd 100755 --- a/src/metax_api/services/common_service.py +++ b/src/metax_api/services/common_service.py @@ -17,13 +17,15 @@ from metax_api.exceptions import Http400, Http412 from metax_api.models import CatalogRecord as cr, File -from metax_api.utils import get_tz_aware_now_without_micros, parse_timestamp_string_to_tz_aware_datetime +from metax_api.utils import ( + get_tz_aware_now_without_micros, + parse_timestamp_string_to_tz_aware_datetime, +) _logger = logging.getLogger(__name__) -class CommonService(): - +class CommonService: @staticmethod def is_primary_key(received_lookup_value): if not received_lookup_value: @@ -47,14 +49,14 @@ def get_boolean_query_param(request, param_name): else: # if method is called before a view's dispatch() method, the only request available # is a low level WSGIRequest. - for query_param in (val for val in request.environ['QUERY_STRING'].split('&')): + for query_param in (val for val in request.environ["QUERY_STRING"].split("&")): try: - param, val = query_param.split('=') + param, val = query_param.split("=") except ValueError: # probably error was 'cant unpack tuple, not enough values' -> was a # param without value specified, such as ?recursive, instead of ?recursive=true. # if flag is specified without value, default value is to be considered True. - param, val = query_param, 'true' + param, val = query_param, "true" if param == param_name: value = val @@ -62,16 +64,16 @@ def get_boolean_query_param(request, param_name): else: return False - if value in ('', 'true'): + if value in ("", "true"): # flag was specified without value (?recursive), or with value (?recursive=true) return True - elif value in (None, 'false'): + elif value in (None, "false"): # flag was not present, or its value was ?recursive=false return False else: - raise ValidationError({ param_name: [ - 'boolean value must be true or false. received value was %s' % value - ]}) + raise ValidationError( + {param_name: ["boolean value must be true or false. received value was %s" % value]} + ) @staticmethod def get_list_query_param(request, param_name): @@ -87,10 +89,10 @@ def get_list_query_param(request, param_name): if value is None: return None - elif value in ('', ','): + elif value in ("", ","): return set() - values_set = set( v.strip() for v in value.split(',') ) + values_set = set(v.strip() for v in value.split(",")) if values_set: return values_set @@ -104,10 +106,10 @@ def has_research_agent_query_params(request): Queries are for example 'creator_person' or 'publisher_organization' Returns boolean """ - fields = ['creator', 'curator', 'publisher', 'rights_holder'] - types = ['organization', 'person'] + fields = ["creator", "curator", "publisher", "rights_holder"] + types = ["organization", "person"] for field in fields: - if any(request.query_params.get(f'{field}_{type}') for type in types): + if any(request.query_params.get(f"{field}_{type}") for type in types): return True return False @@ -125,26 +127,26 @@ def create_bulk(cls, request, serializer_class, **kwargs): serializer_class: does the actual saving, knows what kind of object is in question """ common_info = cls.update_common_info(request, return_only=True) - kwargs['context']['request'] = request + kwargs["context"]["request"] = request results = None if not request.data: - raise Http400('Request body is required') + raise Http400("Request body is required") if isinstance(request.data, list): if len(request.data) == 0: - raise ValidationError(['the received object list is empty']) + raise ValidationError(["the received object list is empty"]) # dont fail the entire request if only some inserts fail. # successfully created rows are added to 'successful', and # failed inserts are added to 'failed', with a related error message. - results = { 'success': [], 'failed': []} + results = {"success": [], "failed": []} cls._create_bulk(common_info, request.data, results, serializer_class, **kwargs) - if results['success']: + if results["success"]: # if even one insert was successful, general status of the request is success http_status = status.HTTP_201_CREATED else: @@ -152,9 +154,11 @@ def create_bulk(cls, request, serializer_class, **kwargs): http_status = status.HTTP_400_BAD_REQUEST else: - results, http_status = cls._create_single(common_info, request.data, serializer_class, **kwargs) + results, http_status = cls._create_single( + common_info, request.data, serializer_class, **kwargs + ) - if 'failed' in results: + if "failed" in results: cls._check_and_raise_atomic_error(request, results) return results, http_status @@ -184,7 +188,7 @@ def _create_bulk(cls, common_info, initial_data_list, results, serializer_class, cls._append_error(results, serializer, e) else: serializer.save(**common_info) - results['success'].append({ 'object': serializer.data }) + results["success"].append({"object": serializer.data}) @staticmethod def get_json_schema(schema_folder_path, model_name, data_catalog_prefix=False): @@ -195,27 +199,27 @@ def get_json_schema(schema_folder_path, model_name, data_catalog_prefix=False): For datasets, a data catalog prefix can be given, in which case it will be the prefix for the schema file name. """ - schema_name = '' + schema_name = "" - if model_name == 'dataset': + if model_name == "dataset": if data_catalog_prefix: schema_name = data_catalog_prefix else: - schema_name = 'ida' + schema_name = "ida" - schema_name += '_' + schema_name += "_" - schema_name += '%s_schema.json' % model_name + schema_name += "%s_schema.json" % model_name try: - with open('%s/%s' % (schema_folder_path, schema_name), encoding='utf-8') as f: + with open("%s/%s" % (schema_folder_path, schema_name), encoding="utf-8") as f: return json_load(f) except IOError as e: - if model_name != 'dataset': + if model_name != "dataset": # only datasets have a default schema raise _logger.warning(e) - with open('%s/ida_dataset_schema.json' % schema_folder_path, encoding='utf-8') as f: + with open("%s/ida_dataset_schema.json" % schema_folder_path, encoding="utf-8") as f: return json_load(f) @classmethod @@ -242,15 +246,20 @@ def update_bulk(cls, request, model_obj, serializer_class, **kwargs): """ if not isinstance(request.data, list): - raise ValidationError({ 'detail': ['request.data is not a list'] }) + raise ValidationError({"detail": ["request.data is not a list"]}) common_info = cls.update_common_info(request, return_only=True) - results = { 'success': [], 'failed': []} + results = {"success": [], "failed": []} for row in request.data: - instance = cls._get_object_for_update(request, model_obj, row, results, - cls._request_has_header(request, 'HTTP_IF_UNMODIFIED_SINCE')) + instance = cls._get_object_for_update( + request, + model_obj, + row, + results, + cls._request_has_header(request, "HTTP_IF_UNMODIFIED_SINCE"), + ) if not instance: continue @@ -264,15 +273,15 @@ def update_bulk(cls, request, model_obj, serializer_class, **kwargs): cls._append_error(results, serializer, e) else: serializer.save(**common_info) - results['success'].append({ 'object': serializer.data }) + results["success"].append({"object": serializer.data}) # if even one operation was successful, general status of the request is success - if len(results.get('success', [])) > 0: + if len(results.get("success", [])) > 0: http_status = status.HTTP_200_OK else: http_status = status.HTTP_400_BAD_REQUEST - if 'failed' in results: + if "failed" in results: cls._check_and_raise_atomic_error(request, results) return results, http_status @@ -288,31 +297,33 @@ def update_common_info(request, return_only=False): return the common info, so that its info can be used manually, instead of updating request.data here automatically. For that purpose, use the return_only flag. """ - if not request.user.username: # pragma: no cover + if not request.user.username: # pragma: no cover # should never happen: update_common_info is executed only on update operations, # which requires authorization, which should put the username into the request obj. - ValidationError({ - 'detail': 'request.user.username not set; unknown service or user. ' - 'how did you get here without passing authorization...?' - }) + ValidationError( + { + "detail": "request.user.username not set; unknown service or user. " + "how did you get here without passing authorization...?" + } + ) method = request.stream and request.stream.method or False current_time = get_tz_aware_now_without_micros() common_info = {} - if method in ('PUT', 'PATCH', 'DELETE'): - common_info['date_modified'] = current_time + if method in ("PUT", "PATCH", "DELETE"): + common_info["date_modified"] = current_time if request.user.is_service: - common_info['service_modified'] = request.user.username + common_info["service_modified"] = request.user.username else: - common_info['user_modified'] = request.user.username - common_info['service_modified'] = None - elif method == 'POST': - common_info['date_created'] = current_time + common_info["user_modified"] = request.user.username + common_info["service_modified"] = None + elif method == "POST": + common_info["date_created"] = current_time if request.user.is_service: - common_info['service_created'] = request.user.username + common_info["service_created"] = request.user.username else: - common_info['user_created'] = request.user.username + common_info["user_created"] = request.user.username else: pass @@ -323,18 +334,23 @@ def update_common_info(request, return_only=False): @staticmethod def _check_and_raise_atomic_error(request, results): - if 'success' in results and not len(results['success']): + if "success" in results and not len(results["success"]): # everything failed anyway, so return normal route even if atomic was used return - if len(results.get('failed', [])) > 0 and request.query_params.get('atomic', None) in ('', 'true'): - raise ValidationError({ - 'success': [], - 'failed': results['failed'], - 'detail': [ - 'request was failed due to parameter atomic=true. all changes were rolled back. ' - 'actual failed rows are listed in the field \"failed\".' - ] - }) + if len(results.get("failed", [])) > 0 and request.query_params.get("atomic", None) in ( + "", + "true", + ): + raise ValidationError( + { + "success": [], + "failed": results["failed"], + "detail": [ + "request was failed due to parameter atomic=true. all changes were rolled back. " + 'actual failed rows are listed in the field "failed".' + ], + } + ) @staticmethod def _append_error(results, serializer, error): @@ -346,16 +362,18 @@ def _append_error(results, serializer, error): is still a crash, and should be fixed. """ try: - results['failed'].append({ 'object': serializer.initial_data, 'errors': serializer.errors }) + results["failed"].append( + {"object": serializer.initial_data, "errors": serializer.errors} + ) except AssertionError: _logger.exception( - 'Looks like serializer.is_valid() tripped - could not access serializer.errors. ' - 'Returning str(e) instead. THIS SHOULD BE FIXED. YES, IM TALKING TO YOU' + "Looks like serializer.is_valid() tripped - could not access serializer.errors. " + "Returning str(e) instead. THIS SHOULD BE FIXED. YES, IM TALKING TO YOU" ) # note that all cases where this happens should be fixed - this is a programming error. # str(e) might show dicts or lists as strings, which would look silly to receiving # humans - results['failed'].append({ 'object': serializer.initial_data, 'errors': str(error) }) + results["failed"].append({"object": serializer.initial_data, "errors": str(error)}) @staticmethod def _get_object_for_update(request, model_obj, row, results, check_unmodified_since): @@ -374,39 +392,48 @@ def _get_object_for_update(request, model_obj, row, results, check_unmodified_si try: instance = model_obj.objects.get(using_dict=row) except model_obj.DoesNotExist: - results['failed'].append({ 'object': row, 'errors': { 'detail': ['object not found'] }}) + results["failed"].append({"object": row, "errors": {"detail": ["object not found"]}}) except ValidationError as e: - results['failed'].append({ 'object': row, 'errors': { 'detail': e.detail } }) + results["failed"].append({"object": row, "errors": {"detail": e.detail}}) if instance and not instance.user_has_access(request): # dont reveal anything from the actual instance ret = {} - if 'id' in row: - ret['id'] = row['id'] - if 'identifier' in row: - ret['identifier'] = row['identifier'] + if "id" in row: + ret["id"] = row["id"] + if "identifier" in row: + ret["identifier"] = row["identifier"] - results['failed'].append({ - 'object': ret, - 'errors': { - 'detail': ['You are not permitted to access this resource.'] + results["failed"].append( + { + "object": ret, + "errors": {"detail": ["You are not permitted to access this resource."]}, } - }) + ) instance = None if instance and check_unmodified_since: - if 'date_modified' not in row: - results['failed'].append({ - 'object': row, - 'errors': { - 'detail': ['Field date_modified is required when the header If-Unmodified-Since is set'] + if "date_modified" not in row: + results["failed"].append( + { + "object": row, + "errors": { + "detail": [ + "Field date_modified is required when the header If-Unmodified-Since is set" + ] + }, + } + ) + elif instance.modified_since(row["date_modified"]): + results["failed"].append( + { + "object": row, + "errors": {"detail": ["Resource has been modified"]}, } - }) - elif instance.modified_since(row['date_modified']): - results['failed'].append({ 'object': row, 'errors': { 'detail': ['Resource has been modified'] } }) + ) else: # good case - all is good pass @@ -416,22 +443,22 @@ def _get_object_for_update(request, model_obj, row, results, check_unmodified_si @classmethod def _validate_and_get_if_unmodified_since_header_as_tz_aware_datetime(cls, request): try: - return cls._validate_http_date_header(request, 'HTTP_IF_UNMODIFIED_SINCE') + return cls._validate_http_date_header(request, "HTTP_IF_UNMODIFIED_SINCE") except: - raise Http400('Bad If-Unmodified-Since header') + raise Http400("Bad If-Unmodified-Since header") @classmethod def validate_and_get_if_modified_since_header_as_tz_aware_datetime(cls, request): try: - return cls._validate_http_date_header(request, 'HTTP_IF_MODIFIED_SINCE') + return cls._validate_http_date_header(request, "HTTP_IF_MODIFIED_SINCE") except: - raise Http400('Bad If-Modified-Since header') + raise Http400("Bad If-Modified-Since header") @staticmethod def _validate_http_date_header(request, header_name): - timestamp = request.META.get(header_name, '') + timestamp = request.META.get(header_name, "") # According to RFC 7232, Http date should always be expressed in 'GMT'. Forcing its use makes this explicit - if not timestamp.endswith('GMT'): + if not timestamp.endswith("GMT"): raise Exception return parse_timestamp_string_to_tz_aware_datetime(timestamp) @@ -441,7 +468,7 @@ def _request_has_header(request, header_name): @staticmethod def _request_is_write_operation(request): - return request.method in ('POST', 'PUT', 'PATCH', 'DELETE') + return request.method in ("POST", "PUT", "PATCH", "DELETE") @staticmethod def request_is_create_operation(request): @@ -451,17 +478,23 @@ def request_is_create_operation(request): files to the dataset thus, not creating the dataset. This might not work for other datatypes out of the box. """ - return request.method in ('POST') and 'files' not in request.path + return request.method in ("POST") and "files" not in request.path @classmethod def check_if_unmodified_since(cls, request, obj): - if cls._request_is_write_operation(request) and \ - cls._request_has_header(request, 'HTTP_IF_UNMODIFIED_SINCE'): + if cls._request_is_write_operation(request) and cls._request_has_header( + request, "HTTP_IF_UNMODIFIED_SINCE" + ): - header_timestamp = cls._validate_and_get_if_unmodified_since_header_as_tz_aware_datetime(request) + header_timestamp = ( + cls._validate_and_get_if_unmodified_since_header_as_tz_aware_datetime(request) + ) if obj.modified_since(header_timestamp): - raise Http412('Resource has been modified since {0} (timezone: {1})'.format( - str(header_timestamp), timezone.get_default_timezone_name())) + raise Http412( + "Resource has been modified since {0} (timezone: {1})".format( + str(header_timestamp), timezone.get_default_timezone_name() + ) + ) @classmethod def set_if_modified_since_filter(cls, request, filter_obj): @@ -475,16 +508,18 @@ def set_if_modified_since_filter(cls, request, filter_obj): :param filter_obj :return: """ - if not cls._request_is_write_operation(request) and cls._request_has_header(request, 'HTTP_IF_MODIFIED_SINCE'): + if not cls._request_is_write_operation(request) and cls._request_has_header( + request, "HTTP_IF_MODIFIED_SINCE" + ): ts = cls.validate_and_get_if_modified_since_header_as_tz_aware_datetime(request) flter = Q(date_modified__gt=ts) | (Q(date_modified=None) & Q(date_created__gt=ts)) - if 'q_filters' in filter_obj: - filter_obj['q_filters'].append(flter) + if "q_filters" in filter_obj: + filter_obj["q_filters"].append(flter) else: - filter_obj['q_filters'] = [flter] + filter_obj["q_filters"] = [flter] @staticmethod def identifiers_to_ids(identifiers: List[any], params=None): @@ -493,16 +528,24 @@ def identifiers_to_ids(identifiers: List[any], params=None): do a query to get a list of pk's instead, since they will be used quite a few times. """ if not isinstance(identifiers, list): - raise Http400('Received identifiers is not a list') + raise Http400("Received identifiers is not a list") elif not identifiers: - _logger.info('Received empty list of identifiers. Aborting') - raise Http400('Received empty list of identifiers') + _logger.info("Received empty list of identifiers. Aborting") + raise Http400("Received empty list of identifiers") elif all(isinstance(x, int) for x in identifiers): return identifiers - if params in ['files', 'noparams']: - identifiers = [ id for id in File.objects.filter(identifier__in=identifiers).values_list('id', flat=True) ] + if params in ["files", "noparams"]: + identifiers = [ + id + for id in File.objects.filter(identifier__in=identifiers).values_list( + "id", flat=True + ) + ] else: - identifiers = [ id for id in cr.objects.filter(identifier__in=identifiers).values_list('id', flat=True) ] + identifiers = [ + id + for id in cr.objects.filter(identifier__in=identifiers).values_list("id", flat=True) + ] return identifiers diff --git a/src/metax_api/services/data_catalog_service.py b/src/metax_api/services/data_catalog_service.py index 72330430..6c3f333d 100755 --- a/src/metax_api/services/data_catalog_service.py +++ b/src/metax_api/services/data_catalog_service.py @@ -16,7 +16,6 @@ class DataCatalogService(ReferenceDataMixin): - @classmethod def validate_reference_data(cls, data_catalog, cache): """ @@ -33,50 +32,84 @@ def validate_reference_data(cls, data_catalog, cache): reference_data = cls.get_reference_data(cache) - refdata = reference_data['reference_data'] + refdata = reference_data["reference_data"] # ic(refdata.keys()) - orgdata = reference_data['organization_data'] + orgdata = reference_data["organization_data"] # ic(orgdata.keys()) errors = defaultdict(list) - for language in data_catalog.get('language', []): - ref_entry = cls.check_ref_data(refdata['language'], language['identifier'], - 'data_catalog_json.language.identifier', errors) + for language in data_catalog.get("language", []): + ref_entry = cls.check_ref_data( + refdata["language"], + language["identifier"], + "data_catalog_json.language.identifier", + errors, + ) if ref_entry: - label_field = 'title' - cls.populate_from_ref_data(ref_entry, language, label_field=label_field, add_in_scheme=False) + label_field = "title" + cls.populate_from_ref_data( + ref_entry, language, label_field=label_field, add_in_scheme=False + ) cls.remove_language_obj_irrelevant_titles(language, label_field) - for fos in data_catalog.get('field_of_science', []): - ref_entry = cls.check_ref_data(refdata['field_of_science'], fos['identifier'], - 'data_catalog_json.field_of_science.identifier', errors) + for fos in data_catalog.get("field_of_science", []): + ref_entry = cls.check_ref_data( + refdata["field_of_science"], + fos["identifier"], + "data_catalog_json.field_of_science.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, fos, label_field='pref_label', add_in_scheme=False) + cls.populate_from_ref_data( + ref_entry, fos, label_field="pref_label", add_in_scheme=False + ) - access_rights = data_catalog.get('access_rights', None) + access_rights = data_catalog.get("access_rights", None) if access_rights: - for access_type in access_rights.get('access_type', []): - ref_entry = cls.check_ref_data(refdata['access_type'], access_type['identifier'], - 'data_catalog_json.access_rights.access_type.identifier', errors) + for access_type in access_rights.get("access_type", []): + ref_entry = cls.check_ref_data( + refdata["access_type"], + access_type["identifier"], + "data_catalog_json.access_rights.access_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, access_type, label_field='pref_label', add_in_scheme=False) - - for license in access_rights.get('license', []): - ref_entry = cls.check_ref_data(refdata['license'], license['identifier'], - 'data_catalog_json.access_rights.license.identifier', errors) + cls.populate_from_ref_data( + ref_entry, + access_type, + label_field="pref_label", + add_in_scheme=False, + ) + + for license in access_rights.get("license", []): + ref_entry = cls.check_ref_data( + refdata["license"], + license["identifier"], + "data_catalog_json.access_rights.license.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, license, label_field='title', add_in_scheme=False) - - if 'has_rights_related_agent' in access_rights: - for agent in access_rights.get('has_rights_related_agent', []): - cls.process_org_obj_against_ref_data(orgdata['organization'], agent, - 'data_catalog_json.access_rights.has_rights_related_agent', - errors=errors) - - publisher = data_catalog.get('publisher', None) + cls.populate_from_ref_data( + ref_entry, license, label_field="title", add_in_scheme=False + ) + + if "has_rights_related_agent" in access_rights: + for agent in access_rights.get("has_rights_related_agent", []): + cls.process_org_obj_against_ref_data( + orgdata["organization"], + agent, + "data_catalog_json.access_rights.has_rights_related_agent", + errors=errors, + ) + + publisher = data_catalog.get("publisher", None) if publisher: - cls.process_org_obj_against_ref_data(orgdata['organization'], publisher, 'data_catalog_json.publisher', - errors=errors) + cls.process_org_obj_against_ref_data( + orgdata["organization"], + publisher, + "data_catalog_json.publisher", + errors=errors, + ) if errors: raise ValidationError(errors) @@ -86,4 +119,4 @@ def is_harvested(data_catalog): if not data_catalog: return False - return DataCatalog.objects.get(id=data_catalog).catalog_json['harvested'] + return DataCatalog.objects.get(id=data_catalog).catalog_json["harvested"] diff --git a/src/metax_api/services/datacite_service.py b/src/metax_api/services/datacite_service.py index aa24c362..144a8155 100755 --- a/src/metax_api/services/datacite_service.py +++ b/src/metax_api/services/datacite_service.py @@ -28,11 +28,11 @@ def convert_cr_to_datacite_cr_json(cr): - cr_json = {'research_dataset': cr.research_dataset} + cr_json = {"research_dataset": cr.research_dataset} if cr.date_created: - cr_json['date_created'] = datetime_to_str(cr.date_created) + cr_json["date_created"] = datetime_to_str(cr.date_created) if cr.preservation_identifier: - cr_json['preservation_identifier'] = cr.preservation_identifier + cr_json["preservation_identifier"] = cr.preservation_identifier return cr_json @@ -42,7 +42,7 @@ def DataciteService(*args, **kwargs): A factory for the Datacite service, which is capable of interacting with Datacite API and converting catalog records into datacite format. """ - if executing_test_case() or kwargs.pop('dummy', False): + if executing_test_case() or kwargs.pop("dummy", False): return _DataciteServiceDummy(*args, **kwargs) else: return _DataciteService(*args, **kwargs) @@ -60,27 +60,28 @@ class _DataciteService(CommonService): def __init__(self, settings=django_settings): if not isinstance(settings, dict): - if hasattr(settings, 'DATACITE'): + if hasattr(settings, "DATACITE"): settings = settings.DATACITE else: - raise Exception('Missing configuration from settings.py: DATACITE') + raise Exception("Missing configuration from settings.py: DATACITE") - if not settings.get('USERNAME', None): - raise Exception('Missing configuration from settings for DATACITE: USERNAME') - if not settings.get('PASSWORD', None): - raise Exception('Missing configuration from settings for DATACITE: PASSWORD') - if not settings.get('PREFIX', None): - raise Exception('Missing configuration from settings for DATACITE: PREFIX') + if not settings.get("USERNAME", None): + raise Exception("Missing configuration from settings for DATACITE: USERNAME") + if not settings.get("PASSWORD", None): + raise Exception("Missing configuration from settings for DATACITE: PASSWORD") + if not settings.get("PREFIX", None): + raise Exception("Missing configuration from settings for DATACITE: PREFIX") - self.user = settings['USERNAME'] - self.pw = settings['PASSWORD'] - self.url = settings['URL'] + self.user = settings["USERNAME"] + self.pw = settings["PASSWORD"] + self.url = settings["URL"] self.mds = DataCiteMDSClient( username=self.user, password=self.pw, - prefix=settings['PREFIX'], - url=self.url) + prefix=settings["PREFIX"], + url=self.url, + ) def create_doi_metadata(self, datacite_xml_metadata): """ @@ -124,87 +125,101 @@ def delete_draft_doi(self, doi): """ try: requests.delete( - '{0}/doi/{1}'.format(self.url, doi), - headers={'Content-Type': 'application/plain;charset=UTF-8'}, - auth=(self.user, self.pw) + "{0}/doi/{1}".format(self.url, doi), + headers={"Content-Type": "application/plain;charset=UTF-8"}, + auth=(self.user, self.pw), ) except Exception as e: - _logger.warning('Could not delete doi in draft state') + _logger.warning("Could not delete doi in draft state") _logger.warning(e) def get_validated_datacite_json(self, cr_json, is_strict, dummy_doi=False): if isinstance(cr_json, list): - raise DataciteException('Datacite conversion can only be done to individual datasets, not lists.') + raise DataciteException( + "Datacite conversion can only be done to individual datasets, not lists." + ) if not cr_json: - raise DataciteException("Catalog record containing research_dataset required to convert anything " - "to datacite format") + raise DataciteException( + "Catalog record containing research_dataset required to convert anything " + "to datacite format" + ) - rd = cr_json['research_dataset'] + rd = cr_json["research_dataset"] # Figure out main lang for the dataset, if applicable - if 'language' in rd and len(rd['language']) > 0: + if "language" in rd and len(rd["language"]) > 0: # used when trying to get most relevant name from langString when only one value is allowed. # note: language contains a three-letter language. we need a two-letter language, in order to # access the langString translations. this may not work as intended for some languages - lid = rd['language'][0]['identifier'] - start_idx = lid.rindex('/') + 1 - main_lang = lid[start_idx:start_idx + 2] + lid = rd["language"][0]["identifier"] + start_idx = lid.rindex("/") + 1 + main_lang = lid[start_idx : start_idx + 2] else: main_lang = None # Creators - if rd.get('creator', False): - creators = self._creators(rd['creator'], main_lang=main_lang) + if rd.get("creator", False): + creators = self._creators(rd["creator"], main_lang=main_lang) else: - raise DataciteException('Dataset does not have a creator (field: research_dataset.creator), which is a ' - 'required value for datacite format') + raise DataciteException( + "Dataset does not have a creator (field: research_dataset.creator), which is a " + "required value for datacite format" + ) # Titles - if rd.get('title', False): - titles = [{'lang': lang, 'title': title} for lang, title in rd['title'].items()] + if rd.get("title", False): + titles = [{"lang": lang, "title": title} for lang, title in rd["title"].items()] else: - raise DataciteException('Dataset does not have a title (field: research_dataset.title), which is ' - 'a required value for datacite format') + raise DataciteException( + "Dataset does not have a title (field: research_dataset.title), which is " + "a required value for datacite format" + ) # Publisher - if rd.get('publisher', False): - publisher = self._main_lang_or_default(rd['publisher']['name'], main_lang) + if rd.get("publisher", False): + publisher = self._main_lang_or_default(rd["publisher"]["name"], main_lang) elif is_strict: - raise DataciteException('Dataset does not have a publisher (field: research_dataset.publisher), which ' - 'is a required value for datacite format') + raise DataciteException( + "Dataset does not have a publisher (field: research_dataset.publisher), which " + "is a required value for datacite format" + ) else: - publisher = self._main_lang_or_default(rd['creator'][0]['name'], main_lang) + publisher = self._main_lang_or_default(rd["creator"][0]["name"], main_lang) # Publication year - if rd.get('issued', False): - publication_year = rd['issued'][0:4] + if rd.get("issued", False): + publication_year = rd["issued"][0:4] elif is_strict: - raise DataciteException('Dataset does not have a date of issuance (field: research_dataset.issued), which ' - 'is a required value for datacite format') + raise DataciteException( + "Dataset does not have a date of issuance (field: research_dataset.issued), which " + "is a required value for datacite format" + ) else: - publication_year = cr_json['date_created'][0:4] + publication_year = cr_json["date_created"][0:4] # Identifier - pref_id = rd['preferred_identifier'] - identifier = cr_json.get('preservation_identifier', None) or pref_id + pref_id = rd["preferred_identifier"] + identifier = cr_json.get("preservation_identifier", None) or pref_id is_metax_doi = is_metax_generated_doi_identifier(identifier) is_metax_urn = is_metax_generated_urn_identifier(identifier) is_remote_doi = is_remote_doi_identifier(identifier) if is_metax_doi or is_remote_doi: identifier_value = extract_doi_from_doi_identifier(identifier) - identifier_type = 'DOI' + identifier_type = "DOI" elif dummy_doi: - identifier_value = '10.0/%s' % identifier - identifier_type = 'DOI' + identifier_value = "10.0/%s" % identifier + identifier_type = "DOI" elif not is_strict and is_metax_urn: identifier_value = identifier - identifier_type = 'URN' + identifier_type = "URN" else: - raise DataciteException('Dataset does not have a valid preferred identifier (field: ' - 'research_dataset.preferred_identifier), which should contain a Metax ' - 'generated DOI, which is a required value for datacite format') + raise DataciteException( + "Dataset does not have a valid preferred identifier (field: " + "research_dataset.preferred_identifier), which should contain a Metax " + "generated DOI, which is a required value for datacite format" + ) """ Required fields, as specified by datacite: @@ -218,79 +233,97 @@ def get_validated_datacite_json(self, cr_json, is_strict, dummy_doi=False): """ datacite_json = { - 'identifier': { - 'identifier': identifier_value, - 'identifierType': identifier_type + "identifier": { + "identifier": identifier_value, + "identifierType": identifier_type, }, - 'creators': creators, - 'titles': titles, - 'publisher': publisher, - 'publicationYear': publication_year, - 'resourceType': { - 'resourceTypeGeneral': self._resource_type_general(rd) - } + "creators": creators, + "titles": titles, + "publisher": publisher, + "publicationYear": publication_year, + "resourceType": {"resourceTypeGeneral": self._resource_type_general(rd)}, } # Optional fields if is_metax_generated_urn_identifier(pref_id) and pref_id != identifier: - datacite_json['alternateIdentifiers'] = [{ - 'alternateIdentifier': pref_id, - 'alternateIdentifierType': 'URN' - }] - - if 'modified' in rd or 'available' in rd.get('access_rights', {}): - datacite_json['dates'] = [] - if 'modified' in rd: - datacite_json['dates'].append({'dateType': 'Updated', 'date': rd['modified']}) - if 'available' in rd.get('access_rights', {}): - datacite_json['dates'].append({'dateType': 'Available', 'date': rd['access_rights']['available']}) - - if 'keyword' in rd or 'field_of_science' in rd or 'theme' in rd: - datacite_json['subjects'] = [] - for kw in rd.get('keyword', []): - datacite_json['subjects'].append({'subject': kw}) - for fos in rd.get('field_of_science', []): - datacite_json['subjects'].extend(self._subjects(fos)) - for theme in rd.get('theme', []): - datacite_json['subjects'].extend(self._subjects(theme)) - - if 'total_files_byte_size' in rd: - datacite_json['sizes'] = [str(rd['total_files_byte_size'])] - - if rd.get('description', False): - datacite_json['descriptions'] = [ - {'lang': lang, 'description': desc, 'descriptionType': 'Abstract'} - for lang, desc in rd['description'].items() + datacite_json["alternateIdentifiers"] = [ + {"alternateIdentifier": pref_id, "alternateIdentifierType": "URN"} + ] + + if "modified" in rd or "available" in rd.get("access_rights", {}): + datacite_json["dates"] = [] + if "modified" in rd: + datacite_json["dates"].append({"dateType": "Updated", "date": rd["modified"]}) + if "available" in rd.get("access_rights", {}): + datacite_json["dates"].append( + {"dateType": "Available", "date": rd["access_rights"]["available"]} + ) + + if "keyword" in rd or "field_of_science" in rd or "theme" in rd: + datacite_json["subjects"] = [] + for kw in rd.get("keyword", []): + datacite_json["subjects"].append({"subject": kw}) + for fos in rd.get("field_of_science", []): + datacite_json["subjects"].extend(self._subjects(fos)) + for theme in rd.get("theme", []): + datacite_json["subjects"].extend(self._subjects(theme)) + + if "total_files_byte_size" in rd: + datacite_json["sizes"] = [str(rd["total_files_byte_size"])] + + if rd.get("description", False): + datacite_json["descriptions"] = [ + {"lang": lang, "description": desc, "descriptionType": "Abstract"} + for lang, desc in rd["description"].items() ] - if 'license' in rd['access_rights']: - datacite_json['rightsList'] = self._licenses(rd['access_rights']) - - if rd.get('language', False): - lid = rd['language'][0]['identifier'] - datacite_json['language'] = lid[lid.rindex('/') + 1:] - - if 'curator' in rd or 'contributor' in rd or 'creator' in rd or 'rights_holder' in rd or 'publisher' in rd: - datacite_json['contributors'] = [] - if 'curator' in rd: - datacite_json['contributors'].extend(self._contributors(rd['curator'], main_lang=main_lang)) - if 'contributor' in rd: - datacite_json['contributors'].extend(self._contributors(rd['contributor'], main_lang=main_lang)) - if 'creator' in rd: - datacite_json['contributors'].extend(self._contributors(rd['creator'], main_lang=main_lang)) - if 'rights_holder' in rd: - datacite_json['contributors'].extend(self._contributors(rd['rights_holder'], main_lang=main_lang)) - if 'publisher' in rd: - datacite_json['contributors'].extend(self._contributors(rd['publisher'], main_lang=main_lang)) - if 'spatial' in rd: - datacite_json['geoLocations'] = self._spatials(rd['spatial']) + if "license" in rd["access_rights"]: + datacite_json["rightsList"] = self._licenses(rd["access_rights"]) + + if rd.get("language", False): + lid = rd["language"][0]["identifier"] + datacite_json["language"] = lid[lid.rindex("/") + 1 :] + + if ( + "curator" in rd + or "contributor" in rd + or "creator" in rd + or "rights_holder" in rd + or "publisher" in rd + ): + datacite_json["contributors"] = [] + if "curator" in rd: + datacite_json["contributors"].extend( + self._contributors(rd["curator"], main_lang=main_lang) + ) + if "contributor" in rd: + datacite_json["contributors"].extend( + self._contributors(rd["contributor"], main_lang=main_lang) + ) + if "creator" in rd: + datacite_json["contributors"].extend( + self._contributors(rd["creator"], main_lang=main_lang) + ) + if "rights_holder" in rd: + datacite_json["contributors"].extend( + self._contributors(rd["rights_holder"], main_lang=main_lang) + ) + if "publisher" in rd: + datacite_json["contributors"].extend( + self._contributors(rd["publisher"], main_lang=main_lang) + ) + if "spatial" in rd: + datacite_json["geoLocations"] = self._spatials(rd["spatial"]) if is_strict: try: jsonschema.validate( datacite_json, - self.get_json_schema(join(dirname(dirname(__file__)), 'api/rest/base/schemas'), 'datacite_4.1') + self.get_json_schema( + join(dirname(dirname(__file__)), "api/rest/base/schemas"), + "datacite_4.1", + ), ) except Exception as e: _logger.error("Failed to validate catalog record against datacite schema") @@ -298,7 +331,9 @@ def get_validated_datacite_json(self, cr_json, is_strict, dummy_doi=False): return datacite_json - def convert_catalog_record_to_datacite_xml(self, cr_json, include_xml_declaration, is_strict, dummy_doi=False): + def convert_catalog_record_to_datacite_xml( + self, cr_json, include_xml_declaration, is_strict, dummy_doi=False + ): """ Convert dataset from catalog record data model to datacite json data model. Validate the json against datacite schema. On success, convert and return as XML. Raise exceptions on errors. @@ -311,7 +346,7 @@ def convert_catalog_record_to_datacite_xml(self, cr_json, include_xml_declaratio output_xml = datacite_schema41.tostring(datacite_json) if not include_xml_declaration: # the +1 is linebreak character - output_xml = output_xml[len("") + 1:] + output_xml = output_xml[len("") + 1 :] return output_xml @staticmethod @@ -322,7 +357,7 @@ def _main_lang_or_default(field, main_lang=None): Param 'field' may also be a standard str field, in which case the field's value is returned. """ if isinstance(field, dict): - for lang in (main_lang, 'en', 'fi', 'und'): + for lang in (main_lang, "en", "fi", "und"): try: return field[lang] except: @@ -341,11 +376,13 @@ def _resource_type_general(rd): def _creators(self, research_agents, main_lang): creators = [] for ra in research_agents: - cr = {'creatorName': self._main_lang_or_default(ra['name'], main_lang=main_lang)} - if 'identifier' in ra: - cr['nameIdentifiers'] = [{'nameIdentifier': ra['identifier'], 'nameIdentifierScheme': 'URI'}] - if 'member_of' in ra: - cr['affiliations'] = self._person_affiliations(ra, main_lang) + cr = {"creatorName": self._main_lang_or_default(ra["name"], main_lang=main_lang)} + if "identifier" in ra: + cr["nameIdentifiers"] = [ + {"nameIdentifier": ra["identifier"], "nameIdentifierScheme": "URI"} + ] + if "member_of" in ra: + cr["affiliations"] = self._person_affiliations(ra, main_lang) creators.append(cr) return creators @@ -355,23 +392,27 @@ def _contributors(self, research_agents, main_lang=None): contributors = [] for ra in research_agents: - if 'contributor_type' not in ra: + if "contributor_type" not in ra: continue - cr_base = {'contributorName': self._main_lang_or_default(ra['name'], main_lang=main_lang)} + cr_base = { + "contributorName": self._main_lang_or_default(ra["name"], main_lang=main_lang) + } - if 'identifier' in ra: - cr_base['nameIdentifiers'] = [{'nameIdentifier': ra['identifier'], 'nameIdentifierScheme': 'URI'}] + if "identifier" in ra: + cr_base["nameIdentifiers"] = [ + {"nameIdentifier": ra["identifier"], "nameIdentifierScheme": "URI"} + ] - if 'member_of' in ra: - cr_base['affiliations'] = self._person_affiliations(ra, main_lang) + if "member_of" in ra: + cr_base["affiliations"] = self._person_affiliations(ra, main_lang) - for ct in ra.get('contributor_type', []): + for ct in ra.get("contributor_type", []): # for example, extracts from initial value: # http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor # and produces value: Distributor cr = dict(cr_base) - cr['contributorType'] = ct['identifier'].split('contributor_type/code/')[-1] + cr["contributorType"] = ct["identifier"].split("contributor_type/code/")[-1] contributors.append(cr) return contributors @@ -384,34 +425,38 @@ def _person_affiliations(person, main_lang): # stuff the affiliation information in all its translations. the datacite spec is # not specific at all regarding this, it does not even care about the lang of the # the given value. - affs.append(person['member_of']['name'][main_lang]) + affs.append(person["member_of"]["name"][main_lang]) except KeyError: - for lang, name_translation in person['member_of']['name'].items(): + for lang, name_translation in person["member_of"]["name"].items(): affs.append(name_translation) return affs @staticmethod def _subjects(concept): subjects = [] - for lang in concept['pref_label'].keys(): + for lang in concept["pref_label"].keys(): item = {} - if lang in concept['pref_label']: - item['schemeURI'] = concept['in_scheme'] - item['subject'] = concept['pref_label'][lang] - item['lang'] = lang + if lang in concept["pref_label"]: + item["schemeURI"] = concept["in_scheme"] + item["subject"] = concept["pref_label"][lang] + item["lang"] = lang subjects.append(item) return subjects @staticmethod def _licenses(access_rights): licenses = [] - for license in access_rights['license']: - for lang in license['title'].keys(): - licenses.append({ - 'lang': lang, - 'rightsURI': license['license'] if 'license' in license else license['identifier'], - 'rights': license['title'][lang] - }) + for license in access_rights["license"]: + for lang in license["title"].keys(): + licenses.append( + { + "lang": lang, + "rightsURI": license["license"] + if "license" in license + else license["identifier"], + "rights": license["title"][lang], + } + ) return licenses @staticmethod @@ -420,29 +465,33 @@ def _spatials(spatials): for spatial in spatials: geo_location = {} - if 'geographic_name' in spatial: - geo_location['geoLocationPlace'] = spatial['geographic_name'] + if "geographic_name" in spatial: + geo_location["geoLocationPlace"] = spatial["geographic_name"] - for wkt in spatial.get('as_wkt', []): - if wkt.startswith('POINT'): - geo_location['geoLocationPoint'] = { - 'pointLongitude': float(re.search(r'POINT\((.*) ', wkt, re.IGNORECASE).group(1)), - 'pointLatitude': float(re.search(r' (.*)\)', wkt, re.IGNORECASE).group(1)), + for wkt in spatial.get("as_wkt", []): + if wkt.startswith("POINT"): + geo_location["geoLocationPoint"] = { + "pointLongitude": float( + re.search(r"POINT\((.*) ", wkt, re.IGNORECASE).group(1) + ), + "pointLatitude": float(re.search(r" (.*)\)", wkt, re.IGNORECASE).group(1)), } # only one point can be placed break - elif wkt.startswith('POLYGON'): - geo_location['geoLocationPolygon'] = { 'polygonPoints': [] } + elif wkt.startswith("POLYGON"): + geo_location["geoLocationPolygon"] = {"polygonPoints": []} # Split POLYGON in case it contains several polygon objects - for polygon in wkt.split('POLYGON')[1][2:-2].split('),('): - for point in polygon.split(','): - longitude, latitude = point.strip().split(' ') + for polygon in wkt.split("POLYGON")[1][2:-2].split("),("): + for point in polygon.split(","): + longitude, latitude = point.strip().split(" ") polygon_point = { - 'pointLongitude': float(longitude), - 'pointLatitude': float(latitude), + "pointLongitude": float(longitude), + "pointLatitude": float(latitude), } - geo_location['geoLocationPolygon']['polygonPoints'].append(polygon_point) + geo_location["geoLocationPolygon"]["polygonPoints"].append( + polygon_point + ) # Do not support for more than one polygon within one POLYGON value, for now break @@ -456,9 +505,9 @@ def _spatials(spatials): class _DataciteServiceDummy(_DataciteService): """ - A dummy Datacite service that doesn't connect to Datacite API but is able to convert catalog records to - datacite format. - """ + A dummy Datacite service that doesn't connect to Datacite API but is able to convert catalog records to + datacite format. + """ def __init__(self, settings=django_settings): pass diff --git a/src/metax_api/services/file_service.py b/src/metax_api/services/file_service.py index adb9b2e3..a1ebd0db 100755 --- a/src/metax_api/services/file_service.py +++ b/src/metax_api/services/file_service.py @@ -40,13 +40,18 @@ -mechanic, so that the imports are not littered in several methods in FileService where they would otherwise be needed. """ + + def DirectorySerializer(*args, **kwargs): from metax_api.api.rest.base.serializers import DirectorySerializer as DS + DirectorySerializer = DS return DirectorySerializer(*args, **kwargs) + def FileSerializer(*args, **kwargs): from metax_api.api.rest.base.serializers import FileSerializer as FS + FileSerializer = FS return FileSerializer(*args, **kwargs) @@ -62,7 +67,7 @@ class FileService(CommonService, ReferenceDataMixin): @staticmethod def check_user_belongs_to_project(request, project_identifier): if project_identifier not in AuthService.get_user_projects(request): - raise Http403({ 'detail': [ 'You do not have access to this project.' ]}) + raise Http403({"detail": ["You do not have access to this project."]}) @classmethod def get_queryset_search_params(cls, request): @@ -75,16 +80,18 @@ def get_queryset_search_params(cls, request): queryset_search_params = {} - if request.query_params.get('project_identifier', False): - project = request.query_params['project_identifier'] + if request.query_params.get("project_identifier", False): + project = request.query_params["project_identifier"] if not request.user.is_service: cls.check_user_belongs_to_project(request, project) - queryset_search_params['project_identifier'] = project + queryset_search_params["project_identifier"] = project - if request.query_params.get('file_path', False): - if not request.query_params.get('project_identifier', False): - raise Http400('query parameter project_identifier is required when using file_path filter') - queryset_search_params['file_path__contains'] = request.query_params['file_path'] + if request.query_params.get("file_path", False): + if not request.query_params.get("project_identifier", False): + raise Http400( + "query parameter project_identifier is required when using file_path filter" + ) + queryset_search_params["file_path__contains"] = request.query_params["file_path"] return queryset_search_params @@ -96,47 +103,52 @@ def restore_files(cls, request, file_identifier_list): Only restores the files; does not touch datasets that might have been previously deprecated when a particular file was marked as removed. """ - _logger.info('Restoring files') + _logger.info("Restoring files") if not file_identifier_list: - _logger.info('Received file identifier list is empty - doing nothing') - return Response({ 'files_restored_count': 0 }, status=status.HTTP_200_OK) + _logger.info("Received file identifier list is empty - doing nothing") + return Response({"files_restored_count": 0}, status=status.HTTP_200_OK) for id in file_identifier_list: if not isinstance(id, str): - raise Http400({ - 'detail': [ - 'identifier values must be strings. found value \'%s\', which is of type %s' - % (id, type(id)) - ] - }) + raise Http400( + { + "detail": [ + "identifier values must be strings. found value '%s', which is of type %s" + % (id, type(id)) + ] + } + ) - _logger.info('Retrieving file details...') + _logger.info("Retrieving file details...") - file_details_list = File.objects_unfiltered \ - .filter(active=True, removed=True, identifier__in=file_identifier_list) \ - .values('id', 'identifier', 'file_path', 'project_identifier') + file_details_list = File.objects_unfiltered.filter( + active=True, removed=True, identifier__in=file_identifier_list + ).values("id", "identifier", "file_path", "project_identifier") if not len(file_details_list): - _logger.info('None of the requested files were found') + _logger.info("None of the requested files were found") raise Http404 elif len(file_details_list) < len(file_identifier_list): - _logger.info('Some of the requested files were not found. Aborting') + _logger.info("Some of the requested files were not found. Aborting") - found_pids = { f['identifier'] for f in file_details_list } - missing_identifiers = [ pid for pid in file_identifier_list if pid not in found_pids ] + found_pids = {f["identifier"] for f in file_details_list} + missing_identifiers = [pid for pid in file_identifier_list if pid not in found_pids] - raise Http400({ - 'detail': [ - 'not all requested file identifiers could be found. list of missing identifiers: %s' - % '\n'.join(missing_identifiers) - ] - }) + raise Http400( + { + "detail": [ + "not all requested file identifiers could be found. list of missing identifiers: %s" + % "\n".join(missing_identifiers) + ] + } + ) elif len(file_details_list) > len(file_identifier_list): - raise Http400({ - 'detail': [ - ''' + raise Http400( + { + "detail": [ + """ Found more files than were requested to be restored! Looks like there are some duplicates within the removed files (same identifier exists more than once). @@ -145,46 +157,51 @@ def restore_files(cls, request, file_identifier_list): attempting to restore. At this point, it is unclear which particular file of the many available ones should be restored. If feasible, in this situation it may be best to properly re-freeze those files entirely (so that new identifiers are generated). - ''' - ] - }) + """ + ] + } + ) else: # good case pass - _logger.info('Validating restore is targeting only one project...') + _logger.info("Validating restore is targeting only one project...") - projects = { f['project_identifier'] for f in file_details_list } + projects = {f["project_identifier"] for f in file_details_list} if len(projects) > 1: - raise Http400({ - 'detail': [ - 'restore operation should target one project at a time. the following projects were found: %s' - % ', '.join([ p for p in projects]) - ] - }) + raise Http400( + { + "detail": [ + "restore operation should target one project at a time. the following projects were found: %s" + % ", ".join([p for p in projects]) + ] + } + ) # note: sets do not support indexing. getting the first (only) item here project_identifier = next(iter(projects)) - _logger.info('Restoring files in project %s. Files to restore: %d' - % (project_identifier, len(file_identifier_list))) + _logger.info( + "Restoring files in project %s. Files to restore: %d" + % (project_identifier, len(file_identifier_list)) + ) # when files were deleted, any empty directories were deleted as well. check # and re-create directories, and assign new parent_directory_id to files being # restored as necessary. common_info = cls.update_common_info(request, return_only=True) - file_details_with_dirs = cls._create_directories_from_file_list(common_info, file_details_list) + file_details_with_dirs = cls._create_directories_from_file_list( + common_info, file_details_list + ) # note, the purpose of %%s: request.user.username is inserted into the query in cr.execute() as # a separate parameter, in order to properly escape it. update_statements = [ - '(%d, %%s, %d)' - % (f['id'], f['parent_directory']) - for f in file_details_with_dirs + "(%d, %%s, %d)" % (f["id"], f["parent_directory"]) for f in file_details_with_dirs ] - sql_restore_files = ''' + sql_restore_files = """ update metax_api_file as file set service_modified = results.service_modified, parent_directory_id = results.parent_directory_id, @@ -196,17 +213,22 @@ def restore_files(cls, request, file_identifier_list): %s ) as results(id, service_modified, parent_directory_id) where results.id = file.id; - ''' % ','.join(update_statements) + """ % ",".join( + update_statements + ) with connection.cursor() as cr: - cr.execute(sql_restore_files, [request.user.username for i in range(len(file_details_list))]) + cr.execute( + sql_restore_files, + [request.user.username for i in range(len(file_details_list))], + ) affected_rows = cr.rowcount - _logger.info('Restored %d files in project %s' % (affected_rows, project_identifier)) + _logger.info("Restored %d files in project %s" % (affected_rows, project_identifier)) cls.calculate_project_directory_byte_sizes_and_file_counts(project_identifier) - return Response({ 'restored_files_count': affected_rows }, status=status.HTTP_200_OK) + return Response({"restored_files_count": affected_rows}, status=status.HTTP_200_OK) @classmethod def get_identifiers(cls, identifiers, params, keysonly): @@ -222,14 +244,16 @@ def get_identifiers(cls, identifiers, params, keysonly): Parameter identifiers can be a list of pk's (integers), or file/dataset identifiers (strings). """ - _logger.info('Retrieving detailed list of %s' % params) + _logger.info("Retrieving detailed list of %s" % params) ids = cls.identifiers_to_ids(identifiers, params) if not ids: return Response([], status=status.HTTP_200_OK) - _logger.info('Searching return for the following %s (printing first 10):\n%s' - % (params, '\n'.join(str(id) for id in ids[:10]))) + _logger.info( + "Searching return for the following %s (printing first 10):\n%s" + % (params, "\n".join(str(id) for id in ids[:10])) + ) noparams = """ SELECT cr.identifier @@ -254,7 +278,9 @@ def get_identifiers(cls, identifiers, params, keysonly): ORDER BY f.id ASC; """ - files_keysonly = files.replace(", json_agg(cr.research_dataset->>'preferred_identifier')", "") + files_keysonly = files.replace( + ", json_agg(cr.research_dataset->>'preferred_identifier')", "" + ) datasets = """ SELECT cr.identifier, json_agg(f.identifier) @@ -272,22 +298,28 @@ def get_identifiers(cls, identifiers, params, keysonly): datasets_keysonly = datasets.replace(", json_agg(f.identifier)", "") if keysonly: - sql = {'files': files_keysonly, 'datasets': datasets_keysonly, 'noparams': noparams} + sql = { + "files": files_keysonly, + "datasets": datasets_keysonly, + "noparams": noparams, + } else: - sql = {'files': files, 'datasets': datasets, 'noparams': noparams} + sql = {"files": files, "datasets": datasets, "noparams": noparams} with connection.cursor() as cr: cr.execute(sql[params], [tuple(ids)]) if cr.rowcount == 0: preferred_identifiers = [] - _logger.info('No %s found for list of input identifiers' % params) + _logger.info("No %s found for list of input identifiers" % params) else: preferred_identifiers = cr.fetchall() - _logger.info('Found following %s:\n%s' % (params, preferred_identifiers)) + _logger.info("Found following %s:\n%s" % (params, preferred_identifiers)) if keysonly: - list_of_keys = [] # This has to be here, cr.fetchall() returns a list of tuples which dict - for tuples in preferred_identifiers: # can't parse like below when second item is empty + list_of_keys = ( + [] + ) # This has to be here, cr.fetchall() returns a list of tuples which dict + for tuples in preferred_identifiers: # can't parse like below when second item is empty list_of_keys.append(tuples[0]) return Response(list_of_keys, status=status.HTTP_200_OK) else: @@ -299,24 +331,28 @@ def destroy_single(cls, file): Mark a single file as removed. Marks related datasets deprecated, and deletes any empty directories above the file. """ - _logger.info('Begin delete file') + _logger.info("Begin delete file") deleted_files_count, project_identifier = cls._mark_files_as_deleted([file.id]) cls._delete_empy_dir_chain_above(file.parent_directory) cls.calculate_project_directory_byte_sizes_and_file_counts(file.project_identifier) cls._mark_datasets_as_deprecated([file.id]) - CallableService.add_post_request_callable(DelayedLog( - event='files_deleted', - files={ - 'project_identifier': file.project_identifier, - 'file_storage': file.file_storage.file_storage_json['identifier'], - 'file_count': deleted_files_count, - } - )) + CallableService.add_post_request_callable( + DelayedLog( + event="files_deleted", + files={ + "project_identifier": file.project_identifier, + "file_storage": file.file_storage.file_storage_json["identifier"], + "file_count": deleted_files_count, + }, + ) + ) - _logger.info('Marked %d files as deleted from project %s' % (deleted_files_count, project_identifier)) - return Response({ 'deleted_files_count': deleted_files_count }, status=status.HTTP_200_OK) + _logger.info( + "Marked %d files as deleted from project %s" % (deleted_files_count, project_identifier) + ) + return Response({"deleted_files_count": deleted_files_count}, status=status.HTTP_200_OK) @classmethod def destroy_bulk(cls, file_identifiers): @@ -334,9 +370,9 @@ def destroy_bulk(cls, file_identifiers): The method returns a http response with the number of deleted files in the body. """ - _logger.info('Begin bulk delete files') + _logger.info("Begin bulk delete files") - file_ids = cls.identifiers_to_ids(file_identifiers, 'noparams') + file_ids = cls.identifiers_to_ids(file_identifiers, "noparams") if not file_ids: raise Http404 @@ -348,17 +384,21 @@ def destroy_bulk(cls, file_identifiers): file = File.objects_unfiltered.get(pk=file_ids[0]) - CallableService.add_post_request_callable(DelayedLog( - event='files_deleted', - files={ - 'project_identifier': file.project_identifier, - 'file_storage': file.file_storage.file_storage_json['identifier'], - 'file_count': deleted_files_count, - } - )) + CallableService.add_post_request_callable( + DelayedLog( + event="files_deleted", + files={ + "project_identifier": file.project_identifier, + "file_storage": file.file_storage.file_storage_json["identifier"], + "file_count": deleted_files_count, + }, + ) + ) - _logger.info('Marked %d files as deleted from project %s' % (deleted_files_count, project_identifier)) - return Response({ 'deleted_files_count': deleted_files_count }, status=status.HTTP_200_OK) + _logger.info( + "Marked %d files as deleted from project %s" % (deleted_files_count, project_identifier) + ) + return Response({"deleted_files_count": deleted_files_count}, status=status.HTTP_200_OK) @classmethod def delete_project(cls, project_id): @@ -367,9 +407,14 @@ def delete_project(cls, project_id): This method is called by FileRPC """ - _logger.info('Begin to delete project from database...') + _logger.info("Begin to delete project from database...") - file_ids = [ id for id in File.objects.filter(project_identifier=project_id).values_list('id', flat=True) ] + file_ids = [ + id + for id in File.objects.filter(project_identifier=project_id).values_list( + "id", flat=True + ) + ] deleted_files_count = 0 @@ -379,34 +424,43 @@ def delete_project(cls, project_id): cls.calculate_project_directory_byte_sizes_and_file_counts(project_id) cls._mark_datasets_as_deprecated(file_ids) else: - _logger.info('Project %s contained no files' % project_id) + _logger.info("Project %s contained no files" % project_id) - _logger.info('Deleted project %s successfully. %d files deleted' % (project_id, deleted_files_count)) + _logger.info( + "Deleted project %s successfully. %d files deleted" % (project_id, deleted_files_count) + ) - return Response({ 'deleted_files_count': deleted_files_count }, status=status.HTTP_200_OK) + return Response({"deleted_files_count": deleted_files_count}, status=status.HTTP_200_OK) @staticmethod def _mark_files_as_deleted(file_ids): """ Mark files designated by file_ids as deleted. """ - _logger.info('Marking files as removed...') + _logger.info("Marking files as removed...") - sql_delete_files = ''' + sql_delete_files = """ update metax_api_file set removed = true, file_deleted = CURRENT_TIMESTAMP, date_modified = CURRENT_TIMESTAMP where active = true and removed = false - and id in %s''' + and id in %s""" - sql_select_related_projects = 'select distinct(project_identifier) from metax_api_file where id in %s' + sql_select_related_projects = ( + "select distinct(project_identifier) from metax_api_file where id in %s" + ) with connection.cursor() as cr: cr.execute(sql_select_related_projects, [tuple(file_ids)]) if cr.rowcount == 0: - raise Http400({ 'detail': ['no files found for given identifiers'] }) + raise Http400({"detail": ["no files found for given identifiers"]}) elif cr.rowcount > 1: - raise Http400({ 'project_identifier': [ - 'deleting files from more than one project in a single request is not allowed'] }) + raise Http400( + { + "project_identifier": [ + "deleting files from more than one project in a single request is not allowed" + ] + } + ) project_identifier = cr.fetchone()[0] cr.execute(sql_delete_files, [tuple(file_ids)]) @@ -420,17 +474,21 @@ def _find_and_delete_empty_directories(cls, project_identifier): Find and delete, if feasible, all empty directories in a project. The only dir found in the project without a parent dir, is considered to be the root. """ - _logger.info('Finding and deleting empty directory chains...') + _logger.info("Finding and deleting empty directory chains...") - root_dir = Directory.objects.filter(project_identifier=project_identifier, parent_directory_id=None) + root_dir = Directory.objects.filter( + project_identifier=project_identifier, parent_directory_id=None + ) - if root_dir.count() > 1: # pragma: no cover - raise ValidationError({ - 'detail': [ - 'found more than one root dir (directories without parents: %s. unable to proceed' % - ', '.join(dr.directory_path for dr in root_dir) - ] - }) + if root_dir.count() > 1: # pragma: no cover + raise ValidationError( + { + "detail": [ + "found more than one root dir (directories without parents: %s. unable to proceed" + % ", ".join(dr.directory_path for dr in root_dir) + ] + } + ) cls._delete_empy_directories(root_dir[0]) @@ -491,31 +549,46 @@ def _mark_datasets_as_deprecated(file_ids): Get all CatalogRecords which have files set to them from file_ids, and set their deprecated flag to True. Then, publish update-messages to rabbitmq. """ - _logger.info('Marking related datasets as deprecated...') + _logger.info("Marking related datasets as deprecated...") deprecated_records = [] current_time = get_tz_aware_now_without_micros() - records = CatalogRecord.objects \ - .filter(files__in=file_ids, deprecated=False) \ - .exclude(data_catalog__catalog_json__identifier=settings.PAS_DATA_CATALOG_IDENTIFIER) \ - .distinct('id') + records = ( + CatalogRecord.objects.filter(files__in=file_ids, deprecated=False) + .exclude(data_catalog__catalog_json__identifier=settings.PAS_DATA_CATALOG_IDENTIFIER) + .distinct("id") + ) for cr in records: cr.deprecate(current_time) deprecated_records.append(cr) if not deprecated_records: - _logger.info('Files were not associated with any datasets.') + _logger.info("Files were not associated with any datasets.") return from metax_api.models.catalog_record import RabbitMQPublishRecord + for cr in deprecated_records: - cr.add_post_request_callable(RabbitMQPublishRecord(cr, 'update')) + cr.add_post_request_callable(RabbitMQPublishRecord(cr, "update")) @classmethod - def get_directory_contents(cls, identifier=None, path=None, project_identifier=None, - recursive=False, max_depth=1, dirs_only=False, include_parent=False, cr_identifier=None, - not_cr_identifier=None, file_name=None, directory_name=None, paginate=None, request=None): + def get_directory_contents( + cls, + identifier=None, + path=None, + project_identifier=None, + recursive=False, + max_depth=1, + dirs_only=False, + include_parent=False, + cr_identifier=None, + not_cr_identifier=None, + file_name=None, + directory_name=None, + paginate=None, + request=None, + ): """ Get files and directories contained by a directory. @@ -559,21 +632,24 @@ def get_directory_contents(cls, identifier=None, path=None, project_identifier=N request: the web request object. """ - assert request is not None, 'kw parameter request must be specified' + assert request is not None, "kw parameter request must be specified" from metax_api.api.rest.base.serializers import LightDirectorySerializer # get targeted directory if identifier: try: - params = { 'id': int(identifier) } + params = {"id": int(identifier)} except ValueError: - params = { 'identifier': identifier } + params = {"identifier": identifier} else: if path and project_identifier: - params = { 'directory_path': path, 'project_identifier': project_identifier } - else: # pragma: no cover - raise ValidationError({'detail': ['no parameters to query by']}) + params = { + "directory_path": path, + "project_identifier": project_identifier, + } + else: # pragma: no cover + raise ValidationError({"detail": ["no parameters to query by"]}) try: fields = LightDirectorySerializer.ls_field_list() @@ -586,18 +662,20 @@ def get_directory_contents(cls, identifier=None, path=None, project_identifier=N if cr_identifier: cr_id, cr_directory_data = cls._get_cr_if_relevant(cr_identifier, directory, request) elif not_cr_identifier: - not_cr_id, cr_directory_data = cls._get_cr_if_relevant(not_cr_identifier, directory, request) + not_cr_id, cr_directory_data = cls._get_cr_if_relevant( + not_cr_identifier, directory, request + ) else: # generally browsing the directory - NOT in the context of a cr! check user permissions if not request.user.is_service: - cls.check_user_belongs_to_project(request, directory['project_identifier']) + cls.check_user_belongs_to_project(request, directory["project_identifier"]) cr_directory_data = {} # get list of field names to retrieve. note: by default all fields are retrieved directory_fields, file_fields = cls._get_requested_file_browsing_fields(request) - if cr_id and recursive and max_depth == '*' and not dirs_only: + if cr_id and recursive and max_depth == "*" and not dirs_only: # optimized for downloading full file list of an entire directory files = cls._get_directory_file_list_recursively_for_cr(directory, cr_id, file_fields) if paginate: @@ -606,12 +684,12 @@ def get_directory_contents(cls, identifier=None, path=None, project_identifier=N return files exclude_id = False - if (recursive or not_cr_id) and directory_fields and 'id' not in directory_fields: + if (recursive or not_cr_id) and directory_fields and "id" not in directory_fields: exclude_id = True - directory_fields.append('id') + directory_fields.append("id") contents = cls._get_directory_contents( - directory['id'], + directory["id"], recursive=recursive, max_depth=max_depth, dirs_only=dirs_only, @@ -622,14 +700,14 @@ def get_directory_contents(cls, identifier=None, path=None, project_identifier=N file_name=file_name, directory_name=directory_name, paginate=paginate, - request=request + request=request, ) def _remove_id(dirs): - for dir in dirs['directories']: - if dir.get('directories'): + for dir in dirs["directories"]: + if dir.get("directories"): _remove_id(dir) - dir.pop('id') + dir.pop("id") if exclude_id: _remove_id(contents) @@ -637,25 +715,28 @@ def _remove_id(dirs): if recursive: if paginate: if dirs_only: - contents['directories'], contents['files'] = cls.dp.paginate_directory_data(contents['directories'], - None, request) - del contents['files'] + ( + contents["directories"], + contents["files"], + ) = cls.dp.paginate_directory_data(contents["directories"], None, request) + del contents["files"] else: - dirs, files = cls.dp.paginate_directory_data(None, contents['files'], request) + dirs, files = cls.dp.paginate_directory_data(None, contents["files"], request) return cls.dp.get_paginated_response(files) if dirs_only: # taken care of the in the called methods. can return the result as is # as a directory tree pass else: - return contents.get('files', []) + return contents.get("files", []) if include_parent: contents.update(LightDirectorySerializer.serialize(directory)) if cls._include_total_byte_sizes_and_file_counts(cr_id, not_cr_id, directory_fields): - cls.retrieve_directory_byte_sizes_and_file_counts_for_cr(contents, not_cr_id, - directory_fields, cr_directory_data) + cls.retrieve_directory_byte_sizes_and_file_counts_for_cr( + contents, not_cr_id, directory_fields, cr_directory_data + ) if paginate: contents = cls.dp.get_paginated_response(contents) @@ -666,27 +747,30 @@ def _remove_id(dirs): def _get_cr_if_relevant(cls, cr_identifier, directory, request): # browsing in the context of a cr try: - cr_params = { 'id': int(cr_identifier) } + cr_params = {"id": int(cr_identifier)} except ValueError: - cr_params = { 'identifier': cr_identifier } + cr_params = {"identifier": cr_identifier} try: - cr = CatalogRecord.objects.only('id', '_directory_data', 'editor', 'user_created', 'research_dataset').\ - get(**cr_params) + cr = CatalogRecord.objects.only( + "id", "_directory_data", "editor", "user_created", "research_dataset" + ).get(**cr_params) except CatalogRecord.DoesNotExist: # raise 400 instead of 404, to distinguish from the error # 'directory not found', which raises a 404 - raise ValidationError({ - 'detail': [ 'CatalogRecord with identifier %s does not exist' % cr_identifier ] - }) + raise ValidationError( + {"detail": ["CatalogRecord with identifier %s does not exist" % cr_identifier]} + ) if not cr.authorized_to_see_catalog_record_files(request): - raise Http403({ - 'detail': [ - 'You do not have permission to see this information because the dataset access type is ' - 'not open and you are not the owner of the catalog record.' - ] - }) + raise Http403( + { + "detail": [ + "You do not have permission to see this information because the dataset access type is " + "not open and you are not the owner of the catalog record." + ] + } + ) cr_id = cr.id cr_directory_data = cr._directory_data or {} @@ -699,15 +783,19 @@ def _get_requested_file_browsing_fields(cls, request): Find out if only specific fields were requested to be returned, and return those fields for directories and files respectively. """ - from metax_api.api.rest.base.serializers import LightDirectorySerializer, LightFileSerializer + from metax_api.api.rest.base.serializers import ( + LightDirectorySerializer, + LightFileSerializer, + ) + directory_fields = [] file_fields = [] - if request.query_params.get('directory_fields', False): - directory_fields = request.query_params['directory_fields'].split(',') + if request.query_params.get("directory_fields", False): + directory_fields = request.query_params["directory_fields"].split(",") - if request.query_params.get('file_fields', False): - file_fields = request.query_params['file_fields'].split(',') + if request.query_params.get("file_fields", False): + file_fields = request.query_params["file_fields"].split(",") directory_fields = LightDirectorySerializer.ls_field_list(directory_fields) file_fields = LightFileSerializer.ls_field_list(file_fields) @@ -716,18 +804,19 @@ def _get_requested_file_browsing_fields(cls, request): @staticmethod def _get_directory_file_list_recursively_for_cr(directory, cr_id, file_fields): - ''' + """ Optimized for downloading full file list of an entire directory in a cr. Not a recursive method + no Model objects or normal serializers. - ''' + """ from metax_api.api.rest.base.serializers import LightFileSerializer - params = { 'project_identifier': directory['project_identifier'] } - if directory['directory_path'] == '/': + params = {"project_identifier": directory["project_identifier"]} + + if directory["directory_path"] == "/": # for root dir, simply omit file_path param to get all project files. pass else: - params['file_path__startswith'] = '%s/' % directory['directory_path'] + params["file_path__startswith"] = "%s/" % directory["directory_path"] files = CatalogRecord.objects.get(pk=cr_id).files.values(*file_fields).filter(**params) return LightFileSerializer.serialize(files) @@ -742,14 +831,27 @@ def _include_total_byte_sizes_and_file_counts(cr_id, not_cr_id, directory_fields if not directory_fields: # specific fields not specified -> all fields are returned return True - if 'byte_size' in directory_fields or 'file_count' in directory_fields: + if "byte_size" in directory_fields or "file_count" in directory_fields: return True return False @classmethod - def _get_directory_contents(cls, directory_id, request=None, recursive=False, max_depth=1, depth=0, dirs_only=False, - cr_id=None, not_cr_id=None, directory_fields=[], file_fields=[], file_name=None, directory_name=None, - paginate=None): + def _get_directory_contents( + cls, + directory_id, + request=None, + recursive=False, + max_depth=1, + depth=0, + dirs_only=False, + cr_id=None, + not_cr_id=None, + directory_fields=[], + file_fields=[], + file_name=None, + directory_name=None, + paginate=None, + ): """ Get files and directories contained by a directory. @@ -765,9 +867,9 @@ def _get_directory_contents(cls, directory_id, request=None, recursive=False, ma for directories and files respectively. """ - if recursive and max_depth != '*': + if recursive and max_depth != "*": if depth > max_depth: - raise MaxRecursionDepthExceeded('max depth is %d' % max_depth) + raise MaxRecursionDepthExceeded("max depth is %d" % max_depth) depth += 1 if cr_id or not_cr_id: @@ -781,17 +883,20 @@ def _get_directory_contents(cls, directory_id, request=None, recursive=False, ma recursive=recursive, dirs_only=dirs_only, directory_fields=directory_fields, - file_fields=file_fields + file_fields=file_fields, ) except Http404: if recursive: - return {'directories': []} + return {"directories": []} raise else: # browsing from ALL files, not cr specific - dirs = Directory.objects.filter(parent_directory_id=directory_id).order_by('directory_path').values( - *directory_fields) + dirs = ( + Directory.objects.filter(parent_directory_id=directory_id) + .order_by("directory_path") + .values(*directory_fields) + ) # icontains returns exception on None and with empty string does unnecessary db hits if directory_name: @@ -801,7 +906,11 @@ def _get_directory_contents(cls, directory_id, request=None, recursive=False, ma files = None else: - files = File.objects.filter(parent_directory_id=directory_id).order_by('file_path').values(*file_fields) + files = ( + File.objects.filter(parent_directory_id=directory_id) + .order_by("file_path") + .values(*file_fields) + ) if file_name: files = files.filter(file_name__icontains=file_name) @@ -809,19 +918,21 @@ def _get_directory_contents(cls, directory_id, request=None, recursive=False, ma dirs, files = cls.dp.paginate_directory_data(dirs, files, request) from metax_api.api.rest.base.serializers import LightDirectorySerializer - contents = { 'directories': LightDirectorySerializer.serialize(dirs) } + + contents = {"directories": LightDirectorySerializer.serialize(dirs)} if files or not dirs_only: # for normal file browsing (not with 'dirs_only'), the files-key should be present, # even if empty. from metax_api.api.rest.base.serializers import LightFileSerializer - contents['files'] = LightFileSerializer.serialize(files) + + contents["files"] = LightFileSerializer.serialize(files) if recursive: - for directory in contents['directories']: + for directory in contents["directories"]: try: sub_dir_contents = cls._get_directory_contents( - directory['id'], + directory["id"], recursive=recursive, max_depth=max_depth, depth=depth, @@ -833,28 +944,38 @@ def _get_directory_contents(cls, directory_id, request=None, recursive=False, ma file_name=file_name, directory_name=directory_name, paginate=paginate, - request=request + request=request, ) except MaxRecursionDepthExceeded: continue - directory['directories'] = sub_dir_contents['directories'] + directory["directories"] = sub_dir_contents["directories"] - if 'files' in sub_dir_contents: - contents['files'] += sub_dir_contents['files'] + if "files" in sub_dir_contents: + contents["files"] += sub_dir_contents["files"] return contents @classmethod - def _get_directory_contents_for_catalog_record(cls, directory_id, cr_id, not_cr_id, file_name, directory_name, - recursive, dirs_only=False, directory_fields=[], file_fields=[]): + def _get_directory_contents_for_catalog_record( + cls, + directory_id, + cr_id, + not_cr_id, + file_name, + directory_name, + recursive, + dirs_only=False, + directory_fields=[], + file_fields=[], + ): """ Browsing files in the context of a specific CR id. """ def _cr_belongin_to_directory(id): if recursive and not dirs_only: - return Directory.objects.filter(parent_directory_id=directory_id).values('id') + return Directory.objects.filter(parent_directory_id=directory_id).values("id") # select dirs which are contained by the directory, # AND which contain files belonging to the cr <-> files m2m relation table, @@ -876,13 +997,19 @@ def _cr_belongin_to_directory(id): for field in directory_fields: if field in allowed_fields: - directory_fields_sql.append('d.' + field) - elif 'parent_directory__' in field and field.split('parent_directory__')[1] in allowed_fields: - directory_fields_sql.append(field.replace('parent_directory__', 'parent_d.')) - directory_fields_string_sql = ', '.join(directory_fields_sql) - - dir_name_sql = '' if not directory_name or not_cr_id else \ - "AND d.directory_name LIKE ('%%' || %s || '%%')" + directory_fields_sql.append("d." + field) + elif ( + "parent_directory__" in field + and field.split("parent_directory__")[1] in allowed_fields + ): + directory_fields_sql.append(field.replace("parent_directory__", "parent_d.")) + directory_fields_string_sql = ", ".join(directory_fields_sql) + + dir_name_sql = ( + "" + if not directory_name or not_cr_id + else "AND d.directory_name LIKE ('%%' || %s || '%%')" + ) sql_select_dirs_for_cr = """ SELECT {} @@ -903,9 +1030,14 @@ def _cr_belongin_to_directory(id): ORDER BY d.directory_path """ with connection.cursor() as cr: - sql_select_dirs_for_cr = sql_select_dirs_for_cr.format(directory_fields_string_sql, dir_name_sql) - sql_params = [directory_id, directory_name, id] if directory_name and not not_cr_id \ + sql_select_dirs_for_cr = sql_select_dirs_for_cr.format( + directory_fields_string_sql, dir_name_sql + ) + sql_params = ( + [directory_id, directory_name, id] + if directory_name and not not_cr_id else [directory_id, id] + ) cr.execute(sql_select_dirs_for_cr, sql_params) dirs = [dict(zip(directory_fields, row)) for row in cr.fetchall()] @@ -915,23 +1047,37 @@ def _cr_belongin_to_directory(id): if cr_id: dirs = _cr_belongin_to_directory(cr_id) - files = None if dirs_only else File.objects \ - .filter(record__pk=cr_id, parent_directory=directory_id).order_by('file_path').values(*file_fields) + files = ( + None + if dirs_only + else File.objects.filter(record__pk=cr_id, parent_directory=directory_id) + .order_by("file_path") + .values(*file_fields) + ) elif not_cr_id: dirs = _cr_belongin_to_directory(not_cr_id) if dirs_only or not recursive: - dirs = Directory.objects.filter(parent_directory=directory_id).exclude( - id__in=[dir['id'] for dir in dirs]).values(*directory_fields) + dirs = ( + Directory.objects.filter(parent_directory=directory_id) + .exclude(id__in=[dir["id"] for dir in dirs]) + .values(*directory_fields) + ) if directory_name: dirs = dirs.filter(directory_name__icontains=directory_name) if directory_name: dirs = dirs.filter(directory_name__icontains=directory_name) - files = None if dirs_only else File.objects.exclude(record__pk=not_cr_id) \ - .filter(parent_directory=directory_id).order_by('file_path').values(*file_fields) + files = ( + None + if dirs_only + else File.objects.exclude(record__pk=not_cr_id) + .filter(parent_directory=directory_id) + .order_by("file_path") + .values(*file_fields) + ) if not dirs and not files: # for this specific version of the record, the requested directory either @@ -944,8 +1090,9 @@ def _cr_belongin_to_directory(id): return dirs, files @classmethod - def retrieve_directory_byte_sizes_and_file_counts_for_cr(cls, directory, not_cr_id=None, - directory_fields=[], cr_directory_data={}): + def retrieve_directory_byte_sizes_and_file_counts_for_cr( + cls, directory, not_cr_id=None, directory_fields=[], cr_directory_data={} + ): """ Retrieve total byte size and file counts of a directory, sub-directories included, in the context of a specific catalog record. @@ -954,32 +1101,35 @@ def retrieve_directory_byte_sizes_and_file_counts_for_cr(cls, directory, not_cr_ if not directory_fields: BYTE_SIZE = FILE_COUNT = True else: - BYTE_SIZE = 'byte_size' in directory_fields - FILE_COUNT = 'file_count' in directory_fields + BYTE_SIZE = "byte_size" in directory_fields + FILE_COUNT = "file_count" in directory_fields - if len(directory.get('directories', [])): - for sub_dir in directory.get('directories', []): - cls.retrieve_directory_byte_sizes_and_file_counts_for_cr(sub_dir, not_cr_id, - directory_fields, cr_directory_data) + if len(directory.get("directories", [])): + for sub_dir in directory.get("directories", []): + cls.retrieve_directory_byte_sizes_and_file_counts_for_cr( + sub_dir, not_cr_id, directory_fields, cr_directory_data + ) - if 'id' in directory: + if "id" in directory: # bottom dir - retrieve total byte_size and file_count for this cr - current_dir = cr_directory_data.get(str(directory['id']), [0, 0]) + current_dir = cr_directory_data.get(str(directory["id"]), [0, 0]) if not_cr_id: - dr_total = Directory.objects.values('id', 'byte_size', 'file_count').get(id=directory['id']) + dr_total = Directory.objects.values("id", "byte_size", "file_count").get( + id=directory["id"] + ) if BYTE_SIZE: if not_cr_id: - directory['byte_size'] = dr_total['byte_size'] - current_dir[0] + directory["byte_size"] = dr_total["byte_size"] - current_dir[0] else: - directory['byte_size'] = current_dir[0] + directory["byte_size"] = current_dir[0] if FILE_COUNT: if not_cr_id: - directory['file_count'] = dr_total['file_count'] - current_dir[1] + directory["file_count"] = dr_total["file_count"] - current_dir[1] else: - directory['file_count'] = current_dir[1] + directory["file_count"] = current_dir[1] @classmethod def get_project_root_directory(cls, project_identifier): @@ -987,6 +1137,7 @@ def get_project_root_directory(cls, project_identifier): Return root directory for a project, with its child directories and files. """ from metax_api.api.rest.base.serializers import LightDirectorySerializer + directory_fields = LightDirectorySerializer.ls_field_list() try: root_dir = Directory.objects.values(*directory_fields).get( @@ -994,12 +1145,14 @@ def get_project_root_directory(cls, project_identifier): ) except Directory.DoesNotExist: raise Http404 - except Directory.MultipleObjectsReturned: # pragma: no cover + except Directory.MultipleObjectsReturned: # pragma: no cover raise Exception( - 'Directory.MultipleObjectsReturned when looking for root directory. This should never happen' + "Directory.MultipleObjectsReturned when looking for root directory. This should never happen" ) root_dir_json = LightDirectorySerializer.serialize(root_dir) - root_dir_json.update(cls._get_directory_contents(root_dir['id'], directory_fields=directory_fields)) + root_dir_json.update( + cls._get_directory_contents(root_dir["id"], directory_fields=directory_fields) + ) return root_dir_json @classmethod @@ -1008,52 +1161,61 @@ def _create_single(cls, common_info, initial_data, serializer_class, **kwargs): Override the original _create_single from CommonService to also create directories, and setting them as parent_directory to approriate dirs and the file, before creating. """ - _logger.info('Begin create single file') + _logger.info("Begin create single file") cls._check_errors_before_creating_dirs([initial_data]) # the same initial data as received, except it has parent_directory set also - initial_data_with_dirs = cls._create_directories_from_file_list(common_info, [initial_data], **kwargs) + initial_data_with_dirs = cls._create_directories_from_file_list( + common_info, [initial_data], **kwargs + ) res = super(FileService, cls)._create_single( - common_info, initial_data_with_dirs[0], serializer_class, **kwargs) + common_info, initial_data_with_dirs[0], serializer_class, **kwargs + ) - cls.calculate_project_directory_byte_sizes_and_file_counts(initial_data['project_identifier']) + cls.calculate_project_directory_byte_sizes_and_file_counts( + initial_data["project_identifier"] + ) - CallableService.add_post_request_callable(DelayedLog( - event='files_created', - user_id=initial_data.get('user_created', res[0]['service_created']), - files={ - 'project_identifier': initial_data['project_identifier'], - 'file_storage': str(initial_data['file_storage']), - 'file_count': 1, - }, - )) + CallableService.add_post_request_callable( + DelayedLog( + event="files_created", + user_id=initial_data.get("user_created", res[0]["service_created"]), + files={ + "project_identifier": initial_data["project_identifier"], + "file_storage": str(initial_data["file_storage"]), + "file_count": 1, + }, + ) + ) - _logger.info('Created 1 new files') + _logger.info("Created 1 new files") return res @classmethod def check_allowed_projects(cls, request): - allowed_projects = CommonService.get_list_query_param(request, 'allowed_projects') + allowed_projects = CommonService.get_list_query_param(request, "allowed_projects") if allowed_projects is not None: if not isinstance(request.data, list): - raise Http400({ 'detail': [ 'request message body must be a single json object' ] }) + raise Http400({"detail": ["request message body must be a single json object"]}) try: - file_ids = [f['identifier'] for f in request.data] + file_ids = [f["identifier"] for f in request.data] except KeyError: - raise Http400({ 'detail': [ 'File identifier is missing' ] }) + raise Http400({"detail": ["File identifier is missing"]}) - project_ids = [ pid for pid in File.objects - .filter(identifier__in=file_ids) - .values_list('project_identifier', flat=True) - .distinct('project_identifier') ] + project_ids = [ + pid + for pid in File.objects.filter(identifier__in=file_ids) + .values_list("project_identifier", flat=True) + .distinct("project_identifier") + ] if not all(pid in allowed_projects for pid in project_ids): - raise Http403({ 'detail': [ 'You do not have permission to update this file' ] }) + raise Http403({"detail": ["You do not have permission to update this file"]}) @classmethod def _create_bulk(cls, common_info, initial_data_list, results, serializer_class, **kwargs): @@ -1061,50 +1223,56 @@ def _create_bulk(cls, common_info, initial_data_list, results, serializer_class, Override the original _create_bulk from CommonService to also create directories, and setting them as parent_directory to approriate files, before creating the files. """ - _logger.info('Begin bulk create files') + _logger.info("Begin bulk create files") cls._check_errors_before_creating_dirs(initial_data_list) - file_list_with_dirs = cls._create_directories_from_file_list(common_info, initial_data_list, **kwargs) + file_list_with_dirs = cls._create_directories_from_file_list( + common_info, initial_data_list, **kwargs + ) - _logger.info('Creating files...') + _logger.info("Creating files...") - cls._create_files( - common_info, file_list_with_dirs, results, serializer_class, **kwargs) + cls._create_files(common_info, file_list_with_dirs, results, serializer_class, **kwargs) - cls.calculate_project_directory_byte_sizes_and_file_counts(initial_data_list[0]['project_identifier']) + cls.calculate_project_directory_byte_sizes_and_file_counts( + initial_data_list[0]["project_identifier"] + ) - CallableService.add_post_request_callable(DelayedLog( - event='files_created', - user_id=initial_data_list[0].get('user_created', common_info['service_created']), - files={ - 'project_identifier': initial_data_list[0]['project_identifier'], - 'file_storage': str(initial_data_list[0]['file_storage']), - 'file_count': len(results.get('success', [])), - } - )) + CallableService.add_post_request_callable( + DelayedLog( + event="files_created", + user_id=initial_data_list[0].get("user_created", common_info["service_created"]), + files={ + "project_identifier": initial_data_list[0]["project_identifier"], + "file_storage": str(initial_data_list[0]["file_storage"]), + "file_count": len(results.get("success", [])), + }, + ) + ) - _logger.info('Created %d new files' % len(results.get('success', []))) + _logger.info("Created %d new files" % len(results.get("success", []))) @classmethod def _create_files(cls, common_info, initial_data_list, results, serializer_class, **kwargs): """ The actual part where the list is iterated and objects validated, and created. """ - project_identifier = initial_data_list[0]['project_identifier'] + project_identifier = initial_data_list[0]["project_identifier"] # pre-fetch all file_paths in the project, to spare an individual db fetch for each file # in serializer.save(), where they otherwise would check for path presence. - project_file_paths = File.objects.filter( - project_identifier=project_identifier).values_list('file_path', flat=True) + project_file_paths = File.objects.filter(project_identifier=project_identifier).values_list( + "file_path", flat=True + ) project_file_paths = set(project_file_paths) - project_dir_paths = set(dirname(f['file_path']) for f in initial_data_list) + project_dir_paths = set(dirname(f["file_path"]) for f in initial_data_list) - project_dir_data_list = Directory.objects \ - .filter(project_identifier=project_identifier, directory_path__in=project_dir_paths) \ - .values_list('id', 'identifier') + project_dir_data_list = Directory.objects.filter( + project_identifier=project_identifier, directory_path__in=project_dir_paths + ).values_list("id", "identifier") - project_dir_data = { dr[0]: dr[1] for dr in project_dir_data_list } + project_dir_data = {dr[0]: dr[1] for dr in project_dir_data_list} file_storage_id = None file_storage_identifier = None @@ -1114,34 +1282,34 @@ def to_model_format(entry, common_info): """ Format that is inserted into db. """ - del entry['checksum'] - entry['file_storage_id'] = entry['file_storage'] - del entry['file_storage'] - entry['parent_directory_id'] = entry['parent_directory'] - del entry['parent_directory'] - entry.update(**common_info) # add date_created, service_created etc fields + del entry["checksum"] + entry["file_storage_id"] = entry["file_storage"] + del entry["file_storage"] + entry["parent_directory_id"] = entry["parent_directory"] + del entry["parent_directory"] + entry.update(**common_info) # add date_created, service_created etc fields def to_repr(entry, common_info, project_dir_data, file_storage_identifier): """ Format that is returned in the response. """ - entry['file_storage'] = { - 'id': entry['file_storage_id'], - 'identifier': file_storage_identifier, + entry["file_storage"] = { + "id": entry["file_storage_id"], + "identifier": file_storage_identifier, } - entry['parent_directory'] = { - 'id': entry['parent_directory_id'], - 'identifier': project_dir_data[entry['parent_directory_id']], + entry["parent_directory"] = { + "id": entry["parent_directory_id"], + "identifier": project_dir_data[entry["parent_directory_id"]], } - del entry['file_storage_id'] - del entry['parent_directory_id'] + del entry["file_storage_id"] + del entry["parent_directory_id"] for field in common_info.keys(): # cast datetime objects into strings entry[field] = str(entry[field]) - entry['checksum'] = serializer_class.form_checksum(entry) + entry["checksum"] = serializer_class.form_checksum(entry) if DEBUG: start = time() @@ -1151,11 +1319,11 @@ def to_repr(entry, common_info, project_dir_data, file_storage_identifier): if file_storage_id: # saves a fetch to db in serializer.is_valid(), once file_storage_id has been retrieved # for one of the files. - row['file_storage'] = file_storage_id + row["file_storage"] = file_storage_id serializer = serializer_class(data=row, **kwargs) - if row['file_path'] not in project_file_paths: + if row["file_path"] not in project_file_paths: # saves a fetch to db in serializer.is_valid() serializer.file_path_checked = True else: @@ -1168,27 +1336,35 @@ def to_repr(entry, common_info, project_dir_data, file_storage_identifier): try: serializer.is_valid(raise_exception=True) except Exception as e: - if CommonService.get_boolean_query_param(kwargs['context']['request'], 'ignore_already_exists_errors'): + if CommonService.get_boolean_query_param( + kwargs["context"]["request"], "ignore_already_exists_errors" + ): if cls._error_is_already_exists(e): # add only a minuscule response informing of the situation... - results['success'].append({ 'object': { - 'identifier': row['identifier'], 'detail': ['already exists'] } - }) + results["success"].append( + { + "object": { + "identifier": row["identifier"], + "detail": ["already exists"], + } + } + ) continue cls._append_error(results, serializer, e) else: entry = serializer.initial_data to_model_format(entry, common_info) entries.append(File(**entry)) - file_storage_id = entry['file_storage_id'] # re-used for following loops + file_storage_id = entry["file_storage_id"] # re-used for following loops if file_storage_identifier is None: # re-used for following loops - file_storage_identifier = FileStorage.objects \ - .get(pk=file_storage_id).file_storage_json['identifier'] + file_storage_identifier = FileStorage.objects.get( + pk=file_storage_id + ).file_storage_json["identifier"] to_repr(entry, common_info, project_dir_data, file_storage_identifier) - results['success'].append({ 'object': entry }) + results["success"].append({"object": entry}) if i % 1000 == 0: # pros and cons of Model.objects.bulk_create(): @@ -1216,17 +1392,22 @@ def to_repr(entry, common_info, project_dir_data, file_storage_identifier): if DEBUG: end = time() - _logger.debug('processed %d files... (%.3f seconds per batch)' % (i, end - start)) + _logger.debug( + "processed %d files... (%.3f seconds per batch)" % (i, end - start) + ) start = time() if entries: - _logger.debug('a final dose of %d records still left to bulk_create...' % len(entries)) + _logger.debug("a final dose of %d records still left to bulk_create..." % len(entries)) File.objects.bulk_create(entries) - _logger.debug('done!') + _logger.debug("done!") if DEBUG: end = time() - _logger.debug('total time for inserting %d files: %d seconds' % (len(initial_data_list), (end - start))) + _logger.debug( + "total time for inserting %d files: %d seconds" + % (len(initial_data_list), (end - start)) + ) @staticmethod def _error_is_already_exists(e): @@ -1234,9 +1415,9 @@ def _error_is_already_exists(e): Check if the error 'identifier already exists' was raised. There may have been other errors included, but they may be a symptom of the record already existing, so we don't care about them. """ - if hasattr(e, 'detail'): + if hasattr(e, "detail"): for field_name, errors in e.detail.items(): - if field_name == 'identifier' and 'already exists' in errors[0]: + if field_name == "identifier" and "already exists" in errors[0]: return True return False @@ -1251,29 +1432,43 @@ def _check_errors_before_creating_dirs(initial_data_list): """ for row in initial_data_list: # ic(row) - if 'file_path' not in row: - raise Http400({ - 'file_path': ['file_path is a required parameter (file id: %s)' % row['identifier']] - }) + if "file_path" not in row: + raise Http400( + { + "file_path": [ + "file_path is a required parameter (file id: %s)" % row["identifier"] + ] + } + ) else: - if row['file_path'][0] != '/': - raise Http400({ - 'file_path': [ - "file path should start with '/' to point to the root. Now '%s'" % row['file_path'] + if row["file_path"][0] != "/": + raise Http400( + { + "file_path": [ + "file path should start with '/' to point to the root. Now '%s'" + % row["file_path"] + ] + } + ) + + if "project_identifier" not in row: + raise Http400( + { + "project_identifier": [ + "project_identifier is a required parameter (file id: %s)" + % row["identifier"] ] - }) + } + ) - if 'project_identifier' not in row: - raise Http400({ - 'project_identifier': [ - 'project_identifier is a required parameter (file id: %s)' % row['identifier'] + if len(set(f["project_identifier"] for f in initial_data_list)) > 1: + raise Http400( + { + "project_identifier": [ + "creating files for multiple projects in one request is not permitted." ] - }) - - if len(set(f['project_identifier'] for f in initial_data_list)) > 1: - raise Http400({ - 'project_identifier': ['creating files for multiple projects in one request is not permitted.'] - }) + } + ) @classmethod def _create_directories_from_file_list(cls, common_info, initial_data_list, **kwargs): @@ -1282,20 +1477,24 @@ def _create_directories_from_file_list(cls, common_info, initial_data_list, **kw as separate entities in the request, so they have to be created based on the paths in the list of files. """ - _logger.info('Checking and creating file hierarchy...') + _logger.info("Checking and creating file hierarchy...") - project_identifier = initial_data_list[0]['project_identifier'] - sorted_data = sorted(initial_data_list, key=lambda row: row['file_path']) - received_dir_paths = sorted(set(dirname(f['file_path']) for f in sorted_data)) + project_identifier = initial_data_list[0]["project_identifier"] + sorted_data = sorted(initial_data_list, key=lambda row: row["file_path"]) + received_dir_paths = sorted(set(dirname(f["file_path"]) for f in sorted_data)) - new_dir_paths, existing_dirs = cls._get_new_and_existing_dirs(received_dir_paths, project_identifier) + new_dir_paths, existing_dirs = cls._get_new_and_existing_dirs( + received_dir_paths, project_identifier + ) if new_dir_paths: - cls._create_directories(common_info, existing_dirs, new_dir_paths, project_identifier, **kwargs) + cls._create_directories( + common_info, existing_dirs, new_dir_paths, project_identifier, **kwargs + ) cls._assign_parents_to_files(existing_dirs, sorted_data) - _logger.info('Directory hierarchy in place') + _logger.info("Directory hierarchy in place") return sorted_data @classmethod @@ -1308,15 +1507,17 @@ def _get_new_and_existing_dirs(cls, received_dir_paths, project_identifier): # get existing dirs as a dict of { 'directory_path': 'id' } existing_dirs = dict( - (dr.directory_path, dr.id) for dr in - Directory.objects.filter( + (dr.directory_path, dr.id) + for dr in Directory.objects.filter( directory_path__in=received_dir_paths, - project_identifier=project_identifier - ).order_by('directory_path') + project_identifier=project_identifier, + ).order_by("directory_path") ) - new_dir_paths = [ path for path in received_dir_paths if path not in existing_dirs ] - _logger.info('Found %d existing, %d new directory paths' % (len(existing_dirs), len(new_dir_paths))) + new_dir_paths = [path for path in received_dir_paths if path not in existing_dirs] + _logger.info( + "Found %d existing, %d new directory paths" % (len(existing_dirs), len(new_dir_paths)) + ) return new_dir_paths, existing_dirs def _get_unique_dir_paths(received_dir_paths): @@ -1325,11 +1526,11 @@ def _get_unique_dir_paths(received_dir_paths): need to be weeded out from the list of received dir paths, to get all required directories that need to be created. """ - all_paths = { '/': True } + all_paths = {"/": True} for path in received_dir_paths: parent_path = dirname(path) - while parent_path != '/': + while parent_path != "/": if parent_path not in all_paths: all_paths[parent_path] = True parent_path = dirname(parent_path) @@ -1338,7 +1539,9 @@ def _get_unique_dir_paths(received_dir_paths): return sorted(all_paths.keys()) @classmethod - def _create_directories(cls, common_info, existing_dirs, new_dir_paths, project_identifier, **kwargs): + def _create_directories( + cls, common_info, existing_dirs, new_dir_paths, project_identifier, **kwargs + ): """ Create to db directory hierarchy from directories extracted from the received file list. @@ -1348,29 +1551,31 @@ def _create_directories(cls, common_info, existing_dirs, new_dir_paths, project_ It is possible, that no new directories are created at all, when appending files to an existing dir. """ - _logger.info('Creating directories...') + _logger.info("Creating directories...") python_process_pid = str(getpid()) for i, path in enumerate(new_dir_paths): directory = { - 'directory_path': path, - 'directory_name': basename(path) if path != '/' else '/', + "directory_path": path, + "directory_name": basename(path) if path != "/" else "/", # identifier: uuid3 as hex, using as salt time in ms, idx of loop, and python process id - 'identifier': uuid3(UUID_NAMESPACE_DNS, '%d%d%s' - % (int(round(time() * 1000)), i, python_process_pid)).hex, - 'project_identifier': project_identifier + "identifier": uuid3( + UUID_NAMESPACE_DNS, + "%d%d%s" % (int(round(time() * 1000)), i, python_process_pid), + ).hex, + "project_identifier": project_identifier, } directory.update(common_info) - if path != '/': + if path != "/": cls._find_parent_dir_from_previously_created_dirs(directory, existing_dirs) serializer = DirectorySerializer(data=directory, **kwargs) serializer.is_valid(raise_exception=True) serializer.save() - existing_dirs[serializer.data['directory_path']] = serializer.data['id'] + existing_dirs[serializer.data["directory_path"]] = serializer.data["id"] @classmethod def _assign_parents_to_files(cls, existing_dirs, sorted_data): @@ -1381,10 +1586,10 @@ def _assign_parents_to_files(cls, existing_dirs, sorted_data): Assigning parent_directory is not allowed for requestors, so all existing parent_directories in the received files are purged. """ - _logger.info('Assigning parent directories to files...') + _logger.info("Assigning parent directories to files...") for row in sorted_data: - row.pop('parent_directory', None) + row.pop("parent_directory", None) for row in sorted_data: cls._find_parent_dir_from_previously_created_dirs(row, existing_dirs) @@ -1399,19 +1604,20 @@ def _find_parent_dir_from_previously_created_dirs(node, existing_dirs): Find the approriate directory id to use as parent_directory, using the node's dirname(file_path) or dirname(directory_path) as key. """ - node_path = node.get('file_path', node.get('directory_path', None)) + node_path = node.get("file_path", node.get("directory_path", None)) - if node_path == '/': + if node_path == "/": return expected_parent_dir_path = dirname(node_path) try: - node['parent_directory'] = existing_dirs[expected_parent_dir_path] - except KeyError: # pragma: no cover + node["parent_directory"] = existing_dirs[expected_parent_dir_path] + except KeyError: # pragma: no cover raise Exception( - 'No parent found for path %s, even though existing_dirs had stuff ' - 'in it. This should never happen' % node.get('file_path', node.get('directory_path', None)) + "No parent found for path %s, even though existing_dirs had stuff " + "in it. This should never happen" + % node.get("file_path", node.get("directory_path", None)) ) @staticmethod @@ -1420,7 +1626,9 @@ def calculate_project_directory_byte_sizes_and_file_counts(project_identifier): (Re-)calculate directory byte sizes and file counts in this project. """ try: - project_root_dir = Directory.objects.get(project_identifier=project_identifier, parent_directory_id=None) + project_root_dir = Directory.objects.get( + project_identifier=project_identifier, parent_directory_id=None + ) except Directory.DoesNotExist: # root directory does not exist - all project files have been deleted pass @@ -1429,39 +1637,43 @@ def calculate_project_directory_byte_sizes_and_file_counts(project_identifier): @classmethod def validate_file_characteristics_reference_data(cls, file_characteristics, cache): - reference_data = cls.get_reference_data(cache).get('reference_data', None) + reference_data = cls.get_reference_data(cache).get("reference_data", None) errors = defaultdict(list) - if 'file_format' in file_characteristics: - ff = file_characteristics['file_format'] - fv = file_characteristics.get('format_version', '') + if "file_format" in file_characteristics: + ff = file_characteristics["file_format"] + fv = file_characteristics.get("format_version", "") versions = cls._validate_file_format_and_get_versions_from_reference_data( - reference_data['file_format_version'], ff, errors) + reference_data["file_format_version"], ff, errors + ) # If the given file_format is a valid value, proceed to checking the given format_version value if not errors: # If the given file_format had several output_format_version values in refdata, but the given # format_version is not one of them, it's an error if versions and fv not in versions: - errors['file_characteristics.format_version'].\ - append('Value \'{0}\' for format_version does not match the allowed values for the given ' - 'file_format value \'{1}\' in reference data'.format(fv, ff)) + errors["file_characteristics.format_version"].append( + "Value '{0}' for format_version does not match the allowed values for the given " + "file_format value '{1}' in reference data".format(fv, ff) + ) # If the given file_format did not have any output_format_version values in refdata, but the given # format_version is non-empty, it's an error elif not versions and fv: - errors['file_characteristics.format_version']. \ - append('Any non-empty value for format_version not allowed for the given file_format value ' - '\'{0}\' in reference data'.format(ff)) + errors["file_characteristics.format_version"].append( + "Any non-empty value for format_version not allowed for the given file_format value " + "'{0}' in reference data".format(ff) + ) # If format_version was given but no file_format was given, it's an error - elif 'format_version' in file_characteristics: - errors['file_characteristics.file_format'].append('Value missing') + elif "format_version" in file_characteristics: + errors["file_characteristics.file_format"].append("Value missing") if errors: raise ValidationError(errors) @staticmethod - def _validate_file_format_and_get_versions_from_reference_data(file_format_version_refdata, - input_file_format, errors={}): + def _validate_file_format_and_get_versions_from_reference_data( + file_format_version_refdata, input_file_format, errors={} + ): """ Check if the input_file_format value exists in file_format_version reference data, and if it does, return a list of all possible output_format_version values for the particular input_file_format. @@ -1471,13 +1683,14 @@ def _validate_file_format_and_get_versions_from_reference_data(file_format_versi versions = [] iff_found = False for entry in file_format_version_refdata: - if input_file_format == entry['input_file_format']: + if input_file_format == entry["input_file_format"]: iff_found = True - if entry.get('output_format_version', False): - versions.append(entry['output_format_version']) + if entry.get("output_format_version", False): + versions.append(entry["output_format_version"]) if not iff_found: - errors['file_characteristics.file_format'].append( - 'Value for file_format \'%s\' not found in reference data' % input_file_format) + errors["file_characteristics.file_format"].append( + "Value for file_format '%s' not found in reference data" % input_file_format + ) return versions diff --git a/src/metax_api/services/pagination.py b/src/metax_api/services/pagination.py index 08c11c91..a164a3f3 100755 --- a/src/metax_api/services/pagination.py +++ b/src/metax_api/services/pagination.py @@ -4,13 +4,13 @@ class DirectoryPagination(LimitOffsetPagination): page_size = 10 - page_size_query_param = 'page_size' + page_size_query_param = "page_size" def paginate_directory_data(self, dirs, files, request, view=None): - ''' + """ Takes in directories and files as lists or querysets. Output is list tuple. - ''' + """ self.count = self.get_count([dirs, files]) self.request = request @@ -38,11 +38,11 @@ def paginate_directory_data(self, dirs, files, request, view=None): dirs = [] offset = self.offset - dir_len if files: - files = files[offset:offset + self.limit] + files = files[offset : offset + self.limit] # if directories are not enough for one page limit elif (self.offset + self.limit) > dir_len: - dirs = dirs[self.offset:] + dirs = dirs[self.offset :] if files: files_to_show = self.limit - (dir_len - self.offset) if files_to_show > 0: @@ -50,7 +50,7 @@ def paginate_directory_data(self, dirs, files, request, view=None): # if enough directories for page limit else: - dirs = dirs[self.offset:self.offset + self.limit] + dirs = dirs[self.offset : self.offset + self.limit] if files: files = [] @@ -65,6 +65,8 @@ def get_count(self, contents): for content in contents: if content: - count = count + len(content) if isinstance(content, list) else count + content.count() + count = ( + count + len(content) if isinstance(content, list) else count + content.count() + ) return count diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index 3406e0b3..1ef10e90 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -18,6 +18,7 @@ _logger = logging.getLogger(__name__) + class _RabbitMQService: def __init__(self): if not hasattr(settings, "RABBITMQ"): @@ -36,27 +37,25 @@ def _connect(self): # Connection retries are needed as long as there is no load balancer in front of rabbitmq-server VMs sleep_time = 1 num_conn_retries = 5 - _logger.info( - f"connecting to RabbitMQ host: {self._hosts} port: {self._settings['PORT']}" - ) + _logger.info(f"connecting to RabbitMQ host: {self._hosts} port: {self._settings['PORT']}") for x in range(0, num_conn_retries): # Choose host randomly so that different hosts are tried out in case of connection problems host = random.choice(self._hosts) try: - kwarg_params = {"port": self._settings["PORT"], "credentials": self._credentials} + kwarg_params = { + "port": self._settings["PORT"], + "credentials": self._credentials, + } if settings.RABBIT_MQ_USE_VHOST: kwarg_params["virtual_host"] = self._settings["VHOST"] conn_params = pika.ConnectionParameters(host, **kwarg_params) - self._connection = pika.BlockingConnection( - conn_params - ) + self._connection = pika.BlockingConnection(conn_params) except Exception as e: _logger.error( - "Problem connecting to RabbitMQ server (%s), trying to reconnect..." - % str(e) + "Problem connecting to RabbitMQ server (%s), trying to reconnect..." % str(e) ) sleep(sleep_time) else: @@ -66,7 +65,7 @@ def _connect(self): else: raise Exception("Unable to connect to RabbitMQ") - def publish(self, body, routing_key='', exchange=None, persistent=True): + def publish(self, body, routing_key="", exchange=None, persistent=True): """ Publish a message to an exchange, which might or might not have queues bound to it. @@ -95,10 +94,13 @@ def publish(self, body, routing_key='', exchange=None, persistent=True): try: for message in messages: if isinstance(message, dict): - message = json_dumps( - message, - cls=DjangoJSONEncoder) - self._channel.basic_publish(body=message, routing_key=routing_key, exchange=exchange, **additional_args) + message = json_dumps(message, cls=DjangoJSONEncoder) + self._channel.basic_publish( + body=message, + routing_key=routing_key, + exchange=exchange, + **additional_args, + ) except Exception as e: _logger.error(e) _logger.error("Unable to publish message to RabbitMQ") @@ -124,9 +126,7 @@ def init_exchanges(self): # declare queues in settings self._channel.queue_declare(queue["NAME"], durable=exchange["DURABLE"]) self._channel.queue_bind( - queue["NAME"], - exchange["NAME"], - queue.get("ROUTING_KEY") + queue["NAME"], exchange["NAME"], queue.get("ROUTING_KEY") ) except Exception as e: _logger.error(e) diff --git a/src/metax_api/services/redis_cache_service.py b/src/metax_api/services/redis_cache_service.py index e127c9b6..c51d0988 100755 --- a/src/metax_api/services/redis_cache_service.py +++ b/src/metax_api/services/redis_cache_service.py @@ -88,13 +88,9 @@ def __init__(self, db=0): if not settings.get("SENTINEL", None): raise Exception("Missing configuration from settings for REDIS: SENTINEL") if not settings["SENTINEL"].get("HOSTS", None): - raise Exception( - "Missing configuration from settings for REDIS.SENTINEL: HOSTS" - ) + raise Exception("Missing configuration from settings for REDIS.SENTINEL: HOSTS") if not settings["SENTINEL"].get("SERVICE", None): - raise Exception( - "Missing configuration from settings for REDIS.SENTINEL: SERVICE" - ) + raise Exception("Missing configuration from settings for REDIS.SENTINEL: SERVICE") if not settings.get("TEST_DB", None): raise Exception("Missing configuration from settings for REDIS: TEST_DB") if len(settings["SENTINEL"]["HOSTS"]) < 3: @@ -105,9 +101,7 @@ def __init__(self, db=0): if executing_test_case(): db = settings["TEST_DB"] elif db == settings["TEST_DB"]: - raise Exception( - "Invalid db: db index %d is reserved for test suite execution." % db - ) + raise Exception("Invalid db: db index %d is reserved for test suite execution." % db) self._redis_local = StrictRedis( host="localhost", @@ -245,9 +239,7 @@ def _get_from_master(self, key, **kwargs): res = master.get(key, **kwargs) except (TimeoutError, MasterNotFoundError): if self._DEBUG: - d( - "cache: master timed out also. no read instances available. returning None" - ) + d("cache: master timed out also. no read instances available. returning None") # uh oh, no master available either. either all redis instances have hit the bucket, # or there is a fail-over in process, and a new master will be in line in a moment return None @@ -273,8 +265,7 @@ def _get_slave(self): return self._sentinel.slave_for(self._service_name, socket_timeout=0.1) def _count_nodes(self): - return ( - len(self._sentinel.discover_slaves(self._service_name)) + 1 - ) # +1 is master + return len(self._sentinel.discover_slaves(self._service_name)) + 1 # +1 is master + RedisCacheService = RedisClient diff --git a/src/metax_api/services/reference_data_mixin.py b/src/metax_api/services/reference_data_mixin.py index b62a4e2d..fd80e63e 100755 --- a/src/metax_api/services/reference_data_mixin.py +++ b/src/metax_api/services/reference_data_mixin.py @@ -19,7 +19,7 @@ d = _logger.debug -class ReferenceDataMixin(): +class ReferenceDataMixin: """ Helper methods that other service classes can use when dealing with reference data @@ -30,7 +30,13 @@ class ReferenceDataMixin(): process_cached_reference_data = None @staticmethod - def check_ref_data(ref_data_type, field_to_check, relation_name, errors={}, value_not_found_is_error=True): + def check_ref_data( + ref_data_type, + field_to_check, + relation_name, + errors={}, + value_not_found_is_error=True, + ): """ Check if the given field exists in the reference data. The value of the field can be either the actual uri, or a shorthand code. @@ -46,11 +52,15 @@ def check_ref_data(ref_data_type, field_to_check, relation_name, errors={}, valu relation_name: the full relation path to the field to hand out in case of errors """ try: - return next(entry for entry in ref_data_type if field_to_check in (entry['uri'], entry['code'])) + return next( + entry for entry in ref_data_type if field_to_check in (entry["uri"], entry["code"]) + ) except StopIteration: if value_not_found_is_error: - _logger.error('Identifier \'%s\' not found in reference data' % field_to_check) - errors[relation_name].append('Identifier \'%s\' not found in reference data' % field_to_check) + _logger.error("Identifier '%s' not found in reference data" % field_to_check) + errors[relation_name].append( + "Identifier '%s' not found in reference data" % field_to_check + ) return None @classmethod @@ -68,14 +78,14 @@ def get_reference_data(cls, cache): if cls.process_cached_reference_data is not None: return cls.process_cached_reference_data - ref_data = cache.get('reference_data') + ref_data = cache.get("reference_data") # ref_data += cache.get('reference-data') if ref_data: cls.process_cached_reference_data = ref_data return cls.process_cached_reference_data else: - _logger.info('reference_data missing from cache - attempting to reload') + _logger.info("reference_data missing from cache - attempting to reload") try: state = ReferenceDataLoader.populate_cache_reference_data(cache) @@ -86,33 +96,35 @@ def get_reference_data(cls, cache): # when ref data was just populated, always retrieve from master to ensure # data is found, since there is a delay in data flow to slaves - ref_data = cache.get('reference_data', master=True) + ref_data = cache.get("reference_data", master=True) if ref_data: cls.process_cached_reference_data = ref_data return cls.process_cached_reference_data - elif state == 'reload_started_by_other' and retry < cls.REF_DATA_RELOAD_MAX_RETRIES: + elif state == "reload_started_by_other" and retry < cls.REF_DATA_RELOAD_MAX_RETRIES: sleep(1) - elif state == 'reload_started_by_other' and retry >= cls.REF_DATA_RELOAD_MAX_RETRIES: + elif state == "reload_started_by_other" and retry >= cls.REF_DATA_RELOAD_MAX_RETRIES: cls._raise_reference_data_reload_error( - 'Reload in progress by another request. Retried max times, and gave up' + "Reload in progress by another request. Retried max times, and gave up" ) else: cls._raise_reference_data_reload_error( - 'Current request tried to reload reference data, but apparently failed,' - ' since key reference_data is still missing' + "Current request tried to reload reference data, but apparently failed," + " since key reference_data is still missing" ) - def populate_from_ref_data(ref_entry, obj, uri_field='identifier', label_field=None, add_in_scheme=True): + def populate_from_ref_data( + ref_entry, obj, uri_field="identifier", label_field=None, add_in_scheme=True + ): """ Always populate at least uri field (even if it was alread there, no big deal). Label field population is necessary for some ref data types only. """ - obj[uri_field] = ref_entry['uri'] - if label_field and 'label' in ref_entry: - obj[label_field] = ref_entry['label'] - if add_in_scheme and 'scheme' in ref_entry: - obj['in_scheme'] = ref_entry['scheme'] + obj[uri_field] = ref_entry["uri"] + if label_field and "label" in ref_entry: + obj[label_field] = ref_entry["label"] + if add_in_scheme and "scheme" in ref_entry: + obj["in_scheme"] = ref_entry["scheme"] def _raise_reference_data_reload_error(error): """ @@ -120,16 +132,18 @@ def _raise_reference_data_reload_error(error): with HTTP 503. """ _logger.exception( - 'Failed to reload reference_data from ES - raising 503 temporarily unavailable.' - ' Details:\n%s' % error + "Failed to reload reference_data from ES - raising 503 temporarily unavailable." + " Details:\n%s" % error ) - error_msg = 'Reference data temporarily unavailable. Please try again later' + error_msg = "Reference data temporarily unavailable. Please try again later" if django_settings.DEBUG: - error_msg += ' DEBUG: %s' % str(error) + error_msg += " DEBUG: %s" % str(error) raise Http503(error_msg) @classmethod - def process_org_obj_against_ref_data(cls, orgdata, org_obj, org_obj_relation_name, refdata=None, errors={}): + def process_org_obj_against_ref_data( + cls, orgdata, org_obj, org_obj_relation_name, refdata=None, errors={} + ): """ First check if org object contains is_part_of relation, in which case recursively call this method until there is no is_part_of relation. After this, check whether org object has a value in identifier field. @@ -146,67 +160,116 @@ def process_org_obj_against_ref_data(cls, orgdata, org_obj, org_obj_relation_nam if not orgdata or not org_obj: return - if org_obj.get('is_part_of', False): - nested_obj = org_obj.get('is_part_of') - cls.process_org_obj_against_ref_data(orgdata, nested_obj, - org_obj_relation_name + '.is_part_of', refdata=refdata, errors=errors) - - if org_obj.get('identifier', False): - ref_entry = cls.check_ref_data(orgdata, org_obj['identifier'], - org_obj_relation_name + '.identifier', value_not_found_is_error=False) + if org_obj.get("is_part_of", False): + nested_obj = org_obj.get("is_part_of") + cls.process_org_obj_against_ref_data( + orgdata, + nested_obj, + org_obj_relation_name + ".is_part_of", + refdata=refdata, + errors=errors, + ) + + if org_obj.get("identifier", False): + ref_entry = cls.check_ref_data( + orgdata, + org_obj["identifier"], + org_obj_relation_name + ".identifier", + value_not_found_is_error=False, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, org_obj, 'identifier', 'name', add_in_scheme=False) + cls.populate_from_ref_data( + ref_entry, org_obj, "identifier", "name", add_in_scheme=False + ) - if ref_entry.get('parent_org_code', False) and 'is_part_of' not in org_obj: - parent_ref_entry = cls.check_ref_data(orgdata, ref_entry['parent_org_code'], - org_obj_relation_name + '.is_part_of.identifier', - value_not_found_is_error=False) + if ref_entry.get("parent_org_code", False) and "is_part_of" not in org_obj: + parent_ref_entry = cls.check_ref_data( + orgdata, + ref_entry["parent_org_code"], + org_obj_relation_name + ".is_part_of.identifier", + value_not_found_is_error=False, + ) if parent_ref_entry: parent_org_obj = {} - if 'DataCatalog' not in cls.__name__: + if "DataCatalog" not in cls.__name__: # Datacatalogs does not allow @type field to be populated for organizations - parent_org_obj['@type'] = 'Organization' - - cls.populate_from_ref_data(parent_ref_entry, parent_org_obj, - 'identifier', 'name', add_in_scheme=False) - org_obj['is_part_of'] = parent_org_obj - - if refdata and 'contributor_type' in refdata: - for contributor_type in org_obj.get('contributor_type', []): - ref_entry = cls.check_ref_data(refdata['contributor_type'], contributor_type['identifier'], - org_obj_relation_name + '.contributor_type.identifier', errors) + parent_org_obj["@type"] = "Organization" + + cls.populate_from_ref_data( + parent_ref_entry, + parent_org_obj, + "identifier", + "name", + add_in_scheme=False, + ) + org_obj["is_part_of"] = parent_org_obj + + if refdata and "contributor_type" in refdata: + for contributor_type in org_obj.get("contributor_type", []): + ref_entry = cls.check_ref_data( + refdata["contributor_type"], + contributor_type["identifier"], + org_obj_relation_name + ".contributor_type.identifier", + errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, contributor_type, label_field='pref_label') + cls.populate_from_ref_data( + ref_entry, contributor_type, label_field="pref_label" + ) @classmethod - def process_research_agent_obj_with_type(cls, orgdata, refdata, errors, agent_obj, agent_obj_relation_name): - if agent_obj.get('@type') == 'Person': - member_of = agent_obj.get('member_of', None) + def process_research_agent_obj_with_type( + cls, orgdata, refdata, errors, agent_obj, agent_obj_relation_name + ): + if agent_obj.get("@type") == "Person": + member_of = agent_obj.get("member_of", None) if member_of: - cls.process_org_obj_against_ref_data(orgdata, member_of, agent_obj_relation_name + '.member_of', - refdata=refdata, errors=errors) + cls.process_org_obj_against_ref_data( + orgdata, + member_of, + agent_obj_relation_name + ".member_of", + refdata=refdata, + errors=errors, + ) - for contributor_role in agent_obj.get('contributor_role', []): - ref_entry = cls.check_ref_data(refdata['contributor_role'], contributor_role['identifier'], - agent_obj_relation_name + '.contributor_role.identifier', errors=errors) + for contributor_role in agent_obj.get("contributor_role", []): + ref_entry = cls.check_ref_data( + refdata["contributor_role"], + contributor_role["identifier"], + agent_obj_relation_name + ".contributor_role.identifier", + errors=errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, contributor_role, label_field='pref_label') - - for contributor_type in agent_obj.get('contributor_type', []): - ref_entry = cls.check_ref_data(refdata['contributor_type'], contributor_type['identifier'], - agent_obj_relation_name + '.contributor_type.identifier', errors=errors) + cls.populate_from_ref_data( + ref_entry, contributor_role, label_field="pref_label" + ) + + for contributor_type in agent_obj.get("contributor_type", []): + ref_entry = cls.check_ref_data( + refdata["contributor_type"], + contributor_type["identifier"], + agent_obj_relation_name + ".contributor_type.identifier", + errors=errors, + ) if ref_entry: - cls.populate_from_ref_data(ref_entry, contributor_type, label_field='pref_label') - - elif agent_obj.get('@type') == 'Organization': - cls.process_org_obj_against_ref_data(orgdata, agent_obj, agent_obj_relation_name, refdata=refdata, - errors=errors) + cls.populate_from_ref_data( + ref_entry, contributor_type, label_field="pref_label" + ) + + elif agent_obj.get("@type") == "Organization": + cls.process_org_obj_against_ref_data( + orgdata, + agent_obj, + agent_obj_relation_name, + refdata=refdata, + errors=errors, + ) @classmethod def remove_language_obj_irrelevant_titles(cls, lang_obj, title_label_field): title_obj = lang_obj.get(title_label_field, None) if title_obj: - to_delete = set(title_obj.keys()).difference(['fi', 'sv', 'en', 'und']) + to_delete = set(title_obj.keys()).difference(["fi", "sv", "en", "und"]) for d in to_delete: del title_obj[d] diff --git a/src/metax_api/services/rems_service.py b/src/metax_api/services/rems_service.py index 9cd7f9a4..ffcbeb9f 100755 --- a/src/metax_api/services/rems_service.py +++ b/src/metax_api/services/rems_service.py @@ -12,48 +12,47 @@ _logger = logging.getLogger(__name__) HANDLER_CLOSEABLE_APPLICATIONS = [ - 'application.state/approved', - 'application.state/returned', - 'application.state/submitted' + "application.state/approved", + "application.state/returned", + "application.state/submitted", ] -APPLICANT_CLOSEABLE_APPLICATIONS = [ - 'application.state/draft' -] +APPLICANT_CLOSEABLE_APPLICATIONS = ["application.state/draft"] + class REMSException(Exception): pass -class REMSService(): +class REMSService: def __init__(self): - if not hasattr(django_settings, 'REMS'): - raise Exception('Missing configuration from settings.py: REMS') + if not hasattr(django_settings, "REMS"): + raise Exception("Missing configuration from settings.py: REMS") settings = django_settings.REMS # only reporter_user is privileged to get all applications from REMS - self.api_key = settings['API_KEY'] - self.base_url = settings['BASE_URL'] - self.etsin_url = settings['ETSIN_URL_TEMPLATE'] - self.metax_user = settings['METAX_USER'] - self.reporter_user = settings['REPORTER_USER'] - self.auto_approver = settings['AUTO_APPROVER'] - self.form_id = settings['FORM_ID'] + self.api_key = settings["API_KEY"] + self.base_url = settings["BASE_URL"] + self.etsin_url = settings["ETSIN_URL_TEMPLATE"] + self.metax_user = settings["METAX_USER"] + self.reporter_user = settings["REPORTER_USER"] + self.auto_approver = settings["AUTO_APPROVER"] + self.form_id = settings["FORM_ID"] self.headers = { "x-rems-api-key": self.api_key, "x-rems-user-id": self.metax_user, - "Content-Type": "application/json" + "Content-Type": "application/json", } try: response = requests.get(f"{self.base_url}/health", headers=self.headers) except Exception as e: - raise Exception(f'Cannot connect to rems while checking its health. Error {e}') + raise Exception(f"Cannot connect to rems while checking its health. Error {e}") - if not response.json()['healthy'] is True: - raise REMSException('Rems is not healthy, request is aborted') + if not response.json()["healthy"] is True: + raise REMSException("Rems is not healthy, request is aborted") def create_rems_entity(self, cr, user_info): """ @@ -62,9 +61,9 @@ def create_rems_entity(self, cr, user_info): self.cr = cr # create user. Successful even if userid is already taken - self._post_rems('user', user_info) + self._post_rems("user", user_info) - wf_id = self._create_workflow(user_info['userid']) + wf_id = self._create_workflow(user_info["userid"]) license_id = self._create_license() res_id = self._create_resource(license_id) @@ -78,9 +77,9 @@ def close_rems_entity(self, old_rems_id, reason): self._close_applications(old_rems_id, reason) - self._close_entity('catalogue-item', rems_ci[0]['id']) - self._close_entity('workflow', rems_ci[0]['wfid']) - self._close_entity('resource', rems_ci[0]['resource-id']) + self._close_entity("catalogue-item", rems_ci[0]["id"]) + self._close_entity("workflow", rems_ci[0]["wfid"]) + self._close_entity("resource", rems_ci[0]["resource-id"]) def update_rems_entity(self, cr, old_rems_id, reason): """ @@ -95,22 +94,21 @@ def update_rems_entity(self, cr, old_rems_id, reason): self._close_applications(old_rems_id, reason) - self._close_entity('catalogue-item', rems_ci[0]['id']) - self._close_entity('resource', rems_ci[0]['resource-id']) + self._close_entity("catalogue-item", rems_ci[0]["id"]) + self._close_entity("resource", rems_ci[0]["resource-id"]) license_id = self._create_license() res_id = self._create_resource(license_id) - self._create_catalogue_item(res_id, rems_ci[0]['wfid']) + self._create_catalogue_item(res_id, rems_ci[0]["wfid"]) def _get_catalogue_item(self, rems_id): rems_ci = self._get_rems( - 'catalogue-item', - f'resource={rems_id}&archived=true&disabled=true' + "catalogue-item", f"resource={rems_id}&archived=true&disabled=true" ) - if len(rems_ci) < 1: # pragma: no cover + if len(rems_ci) < 1: # pragma: no cover # this should not happen - raise REMSException(f'Could not find catalogue-item for {rems_id} in REMS.') + raise REMSException(f"Could not find catalogue-item for {rems_id} in REMS.") return rems_ci @@ -121,126 +119,131 @@ def _close_applications(self, rems_id, reason): Furthermore, closed, rejected or revoked applications cannot be closed. """ # REMS only allows reporter_user to get all applications - self.headers['x-rems-user-id'] = self.reporter_user + self.headers["x-rems-user-id"] = self.reporter_user - applications = self._get_rems('application', f'query=resource:\"{rems_id}\"') + applications = self._get_rems("application", f'query=resource:"{rems_id}"') for application in applications: - if application['application/state'] in HANDLER_CLOSEABLE_APPLICATIONS: - closing_user = application['application/workflow']['workflow.dynamic/handlers'][0]['userid'] - elif application['application/state'] in APPLICANT_CLOSEABLE_APPLICATIONS: - closing_user = application['application/applicant']['userid'] + if application["application/state"] in HANDLER_CLOSEABLE_APPLICATIONS: + closing_user = application["application/workflow"]["workflow.dynamic/handlers"][0][ + "userid" + ] + elif application["application/state"] in APPLICANT_CLOSEABLE_APPLICATIONS: + closing_user = application["application/applicant"]["userid"] else: continue - self.headers['x-rems-user-id'] = closing_user + self.headers["x-rems-user-id"] = closing_user - body = {"application-id": application['application/id'], "comment": f"Closed due to dataset {reason}"} + body = { + "application-id": application["application/id"], + "comment": f"Closed due to dataset {reason}", + } - self._post_rems('application', body, 'close') + self._post_rems("application", body, "close") - self.headers['x-rems-user-id'] = self.metax_user + self.headers["x-rems-user-id"] = self.metax_user def _close_entity(self, entity, id): - body_ar = {'id': id, 'archived': True} - body_en = {'id': id, 'enabled': False} + body_ar = {"id": id, "archived": True} + body_en = {"id": id, "enabled": False} - self._put_rems(entity, 'archived', body_ar) - self._put_rems(entity, 'enabled', body_en) + self._put_rems(entity, "archived", body_ar) + self._put_rems(entity, "enabled", body_en) def _create_workflow(self, user_id): body = { "organization": self.cr.metadata_owner_org, - "title": self.cr.research_dataset['preferred_identifier'], - "type": 'workflow/default', - "handlers": [user_id] + "title": self.cr.research_dataset["preferred_identifier"], + "type": "workflow/default", + "handlers": [user_id], } - response = self._post_rems('workflow', body) + response = self._post_rems("workflow", body) - return response['id'] + return response["id"] def _create_license(self): """ Checks if license is already found from REMS before creating new one """ - license = self.cr.research_dataset['access_rights']['license'][0] - license_url = license.get('identifier') or license['license'] + license = self.cr.research_dataset["access_rights"]["license"][0] + license_url = license.get("identifier") or license["license"] # no search parameter provided for license so have to check by hand - rems_licenses = self._get_rems('license', 'disabled=true&archived=true') + rems_licenses = self._get_rems("license", "disabled=true&archived=true") for lic in rems_licenses: - if any( [v['textcontent'] == license_url for v in lic['localizations'].values()] ): - return lic['id'] + if any([v["textcontent"] == license_url for v in lic["localizations"].values()]): + return lic["id"] - body = { - "licensetype": 'link', - "localizations": {} - } + body = {"licensetype": "link", "localizations": {}} - for lang in list(license['title'].keys()): - body['localizations'].update({ - lang: { - "title": license['title'][lang], - "textcontent": license_url - } - }) + for lang in list(license["title"].keys()): + body["localizations"].update( + {lang: {"title": license["title"][lang], "textcontent": license_url}} + ) - response = self._post_rems('license', body) + response = self._post_rems("license", body) - return response['id'] + return response["id"] def _create_resource(self, license_id): body = { "resid": self.cr.rems_identifier, "organization": self.cr.metadata_owner_org, - "licenses": [license_id] + "licenses": [license_id], } - response = self._post_rems('resource', body) + response = self._post_rems("resource", body) - return response['id'] + return response["id"] def _create_catalogue_item(self, res_id, wf_id): - rd_title = self.cr.research_dataset['title'] + rd_title = self.cr.research_dataset["title"] body = { "form": self.form_id, "resid": res_id, "wfid": wf_id, "localizations": {}, - "enabled": True + "enabled": True, } for lang in list(rd_title.keys()): - body['localizations'].update({ - lang: { - "title": rd_title[lang], - "infourl": self.etsin_url % self.cr.identifier + body["localizations"].update( + { + lang: { + "title": rd_title[lang], + "infourl": self.etsin_url % self.cr.identifier, + } } - }) + ) - response = self._post_rems('catalogue-item', body) + response = self._post_rems("catalogue-item", body) - return response['id'] + return response["id"] - def _post_rems(self, entity, body, action='create'): + def _post_rems(self, entity, body, action="create"): """ Send post to REMS. Action is needed as parameter because applications are closed with post. """ try: - response = requests.post(f"{self.base_url}/{entity}s/{action}", json=body, headers=self.headers) + response = requests.post( + f"{self.base_url}/{entity}s/{action}", json=body, headers=self.headers + ) except Exception as e: - raise Exception(f'Connection to REMS failed while creating {entity}. Error: {e}') + raise Exception(f"Connection to REMS failed while creating {entity}. Error: {e}") if response.status_code != 200: - raise REMSException(f'REMS returned bad status while creating {entity}. Error: {response.text}') + raise REMSException( + f"REMS returned bad status while creating {entity}. Error: {response.text}" + ) # operation status is in body resp = response.json() - if not resp['success']: + if not resp["success"]: raise REMSException(f'Could not {action} {entity} to REMS. Error: {resp["errors"]}') return resp @@ -250,31 +253,37 @@ def _put_rems(self, entity, action, body): Edit rems entity. Possible actions: [edit, archived, enabled]. """ try: - response = requests.put(f"{self.base_url}/{entity}s/{action}", json=body, headers=self.headers) + response = requests.put( + f"{self.base_url}/{entity}s/{action}", json=body, headers=self.headers + ) except Exception as e: - raise Exception(f'Connection to REMS failed while updating {entity}. Error: {e}') + raise Exception(f"Connection to REMS failed while updating {entity}. Error: {e}") if response.status_code != 200: - raise REMSException(f'REMS returned bad status while updating {entity}. Error: {response.text}') + raise REMSException( + f"REMS returned bad status while updating {entity}. Error: {response.text}" + ) # operation status is in body resp = response.json() - if not resp['success']: + if not resp["success"]: raise REMSException(f'Could not update {entity} to REMS. Error: {resp["errors"]}') return resp - def _get_rems(self, entity, params=''): + def _get_rems(self, entity, params=""): try: response = requests.get(f"{self.base_url}/{entity}s?{params}", headers=self.headers) except Exception as e: - raise Exception(f'Connection to REMS failed while getting {entity}. Error: {e}') + raise Exception(f"Connection to REMS failed while getting {entity}. Error: {e}") if response.status_code != 200: - raise REMSException(f'REMS returned bad status while getting {entity}. Error: {response.text}') + raise REMSException( + f"REMS returned bad status while getting {entity}. Error: {response.text}" + ) # operation should be successful if status code 200 return response.json() diff --git a/src/metax_api/services/schema_service.py b/src/metax_api/services/schema_service.py index 9efb5f5e..0d494c5d 100755 --- a/src/metax_api/services/schema_service.py +++ b/src/metax_api/services/schema_service.py @@ -18,8 +18,8 @@ _logger = logging.getLogger(__name__) d = logging.getLogger(__name__).debug -class SchemaService(): +class SchemaService: @classmethod def get_all_schemas(cls): schema_dir = cls._get_schema_dir() @@ -27,10 +27,15 @@ def get_all_schemas(cls): # The prefix is stripped from returned strings, # so that results can be used as is for retrieving specific # schema by reusing the get_json_schema function from CommonService. - schema_files = [f[0:f.rfind('_schema.json')] + schema_files = [ + f[0 : f.rfind("_schema.json")] for f in listdir(schema_dir) - if isfile(join(schema_dir, f)) and f.endswith('_schema.json')] - return Response(data={'count': len(schema_files), 'results': schema_files}, status=status.HTTP_200_OK) + if isfile(join(schema_dir, f)) and f.endswith("_schema.json") + ] + return Response( + data={"count": len(schema_files), "results": schema_files}, + status=status.HTTP_200_OK, + ) @classmethod def get_schema_content(cls, name): @@ -39,7 +44,7 @@ def get_schema_content(cls, name): # Trying to make sure that the absolute path of the requested file # starts with the schema folder in order to prevent dangerous path # traversals. - if abspath(file_path).startswith(schema_dir) and isfile('%s_schema.json' % file_path): + if abspath(file_path).startswith(schema_dir) and isfile("%s_schema.json" % file_path): return Response(CS.get_json_schema(schema_dir, name), status=status.HTTP_200_OK) else: raise Http404 @@ -47,4 +52,4 @@ def get_schema_content(cls, name): @staticmethod def _get_schema_dir(): cur_dir = abspath(dirname(__file__)) - return abspath('%s/../api/rest/base/schemas' % cur_dir) + return abspath("%s/../api/rest/base/schemas" % cur_dir) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index 21afeb8a..71d5af7b 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -16,14 +16,13 @@ _logger = logging.getLogger(__name__) -class StatisticService(): - +class StatisticService: @staticmethod def _get_access_types(): - sql_distinct_access_types = ''' + sql_distinct_access_types = """ select distinct(research_dataset->'access_rights'->'access_type'->>'identifier') from metax_api_catalogrecord - ''' + """ with connection.cursor() as cr: cr.execute(sql_distinct_access_types) @@ -32,26 +31,28 @@ def _get_access_types(): return access_types @classmethod - def count_datasets(cls, - access_type=None, - data_catalog=None, - deprecated=None, - from_date=None, - harvested=None, - latest=True, - legacy=None, - metadata_owner_org=None, - metadata_provider_org=None, - metadata_provider_user=None, - preservation_state=None, - removed=None, - to_date=None): + def count_datasets( + cls, + access_type=None, + data_catalog=None, + deprecated=None, + from_date=None, + harvested=None, + latest=True, + legacy=None, + metadata_owner_org=None, + metadata_provider_org=None, + metadata_provider_user=None, + preservation_state=None, + removed=None, + to_date=None, + ): """ Get simple total record count and total byte size according to given filters. """ - _logger.info('Retrieving total count and byte size...') + _logger.info("Retrieving total count and byte size...") - sql = ''' + sql = """ SELECT count(cr.id) AS count, COALESCE(SUM(COALESCE((research_dataset->>'total_files_byte_size')::bigint, 0)), 0) AS ida_byte_size @@ -59,40 +60,44 @@ def count_datasets(cls, join metax_api_datacatalog as dc on dc.id = cr.data_catalog_id where 1=1 %s - ''' + """ where_args = [] sql_args = [] where_args.append("and state = 'published'") if from_date: - where_args.append('and cr.date_created >= %s::date') + where_args.append("and cr.date_created >= %s::date") sql_args.append(from_date) if to_date: - where_args.append('and cr.date_created <= %s::date') + where_args.append("and cr.date_created <= %s::date") sql_args.append(to_date) if access_type: - where_args.append("and research_dataset->'access_rights'->'access_type'->>'identifier' = %s") + where_args.append( + "and research_dataset->'access_rights'->'access_type'->>'identifier' = %s" + ) sql_args.append(access_type) if data_catalog: try: - DataCatalog.objects.values('id').get(pk=int(data_catalog)) - where_args.append('and dc.id = %s') + DataCatalog.objects.values("id").get(pk=int(data_catalog)) + where_args.append("and dc.id = %s") except DataCatalog.DoesNotExist: - raise Http400({ 'detail': ['Data catalog identifier %s not found' % data_catalog] }) + raise Http400({"detail": ["Data catalog identifier %s not found" % data_catalog]}) except ValueError: try: - DataCatalog.objects.values('id').get(catalog_json__identifier=data_catalog) + DataCatalog.objects.values("id").get(catalog_json__identifier=data_catalog) where_args.append("and dc.catalog_json->>'identifier' = %s") except DataCatalog.DoesNotExist: - raise Http400({ 'detail': ['Data catalog identifier %s not found' % data_catalog] }) + raise Http400( + {"detail": ["Data catalog identifier %s not found" % data_catalog]} + ) sql_args.append(data_catalog) if deprecated is not None: - where_args.append('and deprecated = %s') + where_args.append("and deprecated = %s") sql_args.append(deprecated) if harvested: @@ -100,45 +105,52 @@ def count_datasets(cls, sql_args.append(harvested) if latest: - where_args.append('and next_dataset_version_id is null') + where_args.append("and next_dataset_version_id is null") if metadata_owner_org: - where_args.append('and metadata_owner_org = %s') + where_args.append("and metadata_owner_org = %s") sql_args.append(metadata_owner_org) if metadata_provider_org: - where_args.append('and metadata_provider_org = %s') + where_args.append("and metadata_provider_org = %s") sql_args.append(metadata_provider_org) if metadata_provider_user: - where_args.append('and metadata_provider_user = %s') + where_args.append("and metadata_provider_user = %s") sql_args.append(metadata_provider_user) if preservation_state: - where_args.append('and preservation_state = %s') + where_args.append("and preservation_state = %s") sql_args.append(preservation_state) if removed is not None: - where_args.append('and cr.removed = %s') + where_args.append("and cr.removed = %s") sql_args.append(removed) if legacy is not None: - where_args.append(''.join(["and dc.catalog_json->>'identifier'", " = " if legacy else " != ", "any(%s)"])) + where_args.append( + "".join( + [ + "and dc.catalog_json->>'identifier'", + " = " if legacy else " != ", + "any(%s)", + ] + ) + ) sql_args.append(settings.LEGACY_CATALOGS) - sql = sql % '\n'.join(where_args) + sql = sql % "\n".join(where_args) with connection.cursor() as cr: cr.execute(sql, sql_args) try: results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() + dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall() ][0] except IndexError: results = {} - _logger.info('Done retrieving total count and byte size') + _logger.info("Done retrieving total count and byte size") return results @@ -147,9 +159,9 @@ def total_datasets(cls, from_date, to_date, latest=True, legacy=None, removed=No """ Retrieve dataset count and byte size per month and monthly cumulative from all datasets. """ - _logger.info('Retrieving total count and byte sizes for all datasets...') + _logger.info("Retrieving total count and byte sizes for all datasets...") - sql_all_datasets = ''' + sql_all_datasets = """ WITH cte AS ( SELECT date_trunc('month', cr.date_created) AS mon, @@ -182,36 +194,39 @@ def total_datasets(cls, from_date, to_date, latest=True, legacy=None, removed=No ) cr USING (mon) GROUP BY mon, c.mon_ida_byte_size, count ORDER BY mon; - ''' + """ filter_sql = [] filter_args = [] if latest: - filter_sql.append('and next_dataset_version_id is null') + filter_sql.append("and next_dataset_version_id is null") if removed is not None: - filter_sql.append('and cr.removed = %s') + filter_sql.append("and cr.removed = %s") filter_args.append(removed) if legacy is not None: - filter_sql.append(''.join(["and dc.catalog_json->>'identifier'", " = " if legacy else " != ", "any(%s)"])) + filter_sql.append( + "".join( + [ + "and dc.catalog_json->>'identifier'", + " = " if legacy else " != ", + "any(%s)", + ] + ) + ) filter_args.append(settings.LEGACY_CATALOGS) - sql_all_datasets = sql_all_datasets.replace( - 'OPTIONAL_WHERE_FILTERS', - '\n'.join(filter_sql)) + sql_all_datasets = sql_all_datasets.replace("OPTIONAL_WHERE_FILTERS", "\n".join(filter_sql)) - sql_args = filter_args + [from_date + '-01', to_date + '-01'] + filter_args + sql_args = filter_args + [from_date + "-01", to_date + "-01"] + filter_args with connection.cursor() as cr: cr.execute(sql_all_datasets, sql_args) - results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() - ] + results = [dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall()] - _logger.info('Done retrieving total count and byte sizes') + _logger.info("Done retrieving total count and byte sizes") return results @@ -223,37 +238,39 @@ def total_data_catalog_datasets(cls, from_date, to_date, data_catalog=None): """ if data_catalog: try: - dc_params = { 'pk': int(data_catalog) } + dc_params = {"pk": int(data_catalog)} except ValueError: - dc_params = { 'catalog_json__identifier': data_catalog } + dc_params = {"catalog_json__identifier": data_catalog} try: dc = DataCatalog.objects.filter(**dc_params).values() except: - raise Http400({ 'detail': ['Data catalog identifier %s not found' % data_catalog] }) + raise Http400({"detail": ["Data catalog identifier %s not found" % data_catalog]}) _logger.info( - 'Retrieving total count and byte sizes for datasets in catalog: %s' % dc['catalog_json']['identifier'] + "Retrieving total count and byte sizes for datasets in catalog: %s" + % dc["catalog_json"]["identifier"] ) catalogs = [dc] else: - _logger.info('Retrieving total count and byte sizes for datasets in all catalogs') + _logger.info("Retrieving total count and byte sizes for datasets in all catalogs") catalogs = DataCatalog.objects.all().values() access_types = cls._get_access_types() results = {} for dc in catalogs: - results[dc['catalog_json']['identifier']] \ - = cls._total_data_catalog_datasets(from_date, to_date, access_types, dc['id']) + results[dc["catalog_json"]["identifier"]] = cls._total_data_catalog_datasets( + from_date, to_date, access_types, dc["id"] + ) - _logger.info('Done retrieving total count and byte sizes') + _logger.info("Done retrieving total count and byte sizes") return results @classmethod def _total_data_catalog_datasets(cls, from_date, to_date, access_types, dc_id): - sql = ''' + sql = """ WITH cte AS ( SELECT date_trunc('month', cr.date_created) AS mon, @@ -291,7 +308,7 @@ def _total_data_catalog_datasets(cls, from_date, to_date, access_types, dc_id): ) cr USING (mon) GROUP BY mon, c.mon_ida_byte_size, count, access_type ORDER BY mon; - ''' + """ # group results by access_type grouped = {} @@ -300,10 +317,9 @@ def _total_data_catalog_datasets(cls, from_date, to_date, access_types, dc_id): for access_type in access_types: cr.execute(sql, [dc_id, access_type, from_date, to_date, dc_id, access_type]) results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() + dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall() ] - grouped[access_type.split('/')[-1]] = results + grouped[access_type.split("/")[-1]] = results total = [] @@ -315,14 +331,14 @@ def _total_data_catalog_datasets(cls, from_date, to_date, access_types, dc_id): except IndexError: total.append(stats) else: - last['count'] += stats['count'] - last['count_cumulative'] += stats['count_cumulative'] - last['ida_byte_size'] += stats['ida_byte_size'] - last['ida_byte_size_cumulative'] += stats['ida_byte_size_cumulative'] + last["count"] += stats["count"] + last["count_cumulative"] += stats["count_cumulative"] + last["ida_byte_size"] += stats["ida_byte_size"] + last["ida_byte_size_cumulative"] += stats["ida_byte_size_cumulative"] - grouped['total'] = total + grouped["total"] = total - _logger.info('Done retrieving total count and byte sizes') + _logger.info("Done retrieving total count and byte sizes") return grouped @@ -333,14 +349,18 @@ def total_organization_datasets(cls, from_date, to_date, metadata_owner_org=None or a given single organization, grouped by catalog. """ if metadata_owner_org: - _logger.info('Retrieving total count and byte sizes for datasets for organization: %s' % metadata_owner_org) + _logger.info( + "Retrieving total count and byte sizes for datasets for organization: %s" + % metadata_owner_org + ) metadata_owner_orgs = [metadata_owner_org] else: - _logger.info('Retrieving total count and byte sizes for datasets for all organizations') - metadata_owner_orgs = CatalogRecord.objects \ - .values_list('metadata_owner_org', flat=True) \ - .order_by('metadata_owner_org') \ - .distinct('metadata_owner_org') + _logger.info("Retrieving total count and byte sizes for datasets for all organizations") + metadata_owner_orgs = ( + CatalogRecord.objects.values_list("metadata_owner_org", flat=True) + .order_by("metadata_owner_org") + .distinct("metadata_owner_org") + ) results = {} for org in metadata_owner_orgs: @@ -350,7 +370,7 @@ def total_organization_datasets(cls, from_date, to_date, metadata_owner_org=None @classmethod def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): - sql = ''' + sql = """ WITH cte AS ( SELECT date_trunc('month', cr.date_created) AS mon, @@ -385,21 +405,32 @@ def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): ) cr USING (mon) GROUP BY mon, c.mon_ida_byte_size, count ORDER BY mon; - ''' + """ - catalogs = DataCatalog.objects.filter(catalog_json__research_dataset_schema__in=['ida', 'att']).values() + catalogs = DataCatalog.objects.filter( + catalog_json__research_dataset_schema__in=["ida", "att"] + ).values() # group results by catalogs grouped = {} with connection.cursor() as cr: for dc in catalogs: - cr.execute(sql, [dc['id'], metadata_owner_org, from_date, to_date, dc['id'], metadata_owner_org]) + cr.execute( + sql, + [ + dc["id"], + metadata_owner_org, + from_date, + to_date, + dc["id"], + metadata_owner_org, + ], + ) results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() + dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall() ] - grouped[dc['catalog_json']['identifier']] = results + grouped[dc["catalog_json"]["identifier"]] = results total = [] @@ -411,14 +442,14 @@ def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): except IndexError: total.append(stats) else: - last['count'] += stats['count'] - last['count_cumulative'] += stats['count_cumulative'] - last['ida_byte_size'] += stats['ida_byte_size'] - last['ida_byte_size_cumulative'] += stats['ida_byte_size_cumulative'] + last["count"] += stats["count"] + last["count_cumulative"] += stats["count_cumulative"] + last["ida_byte_size"] += stats["ida_byte_size"] + last["ida_byte_size_cumulative"] += stats["ida_byte_size_cumulative"] - grouped['total'] = total + grouped["total"] = total - _logger.info('Done retrieving total count and byte sizes') + _logger.info("Done retrieving total count and byte sizes") return grouped @@ -428,9 +459,9 @@ def total_harvested_datasets(cls, from_date, to_date): For harvested datasets, retrieve dataset count per month and monthly cumulative, and grouped by access_type. """ - _logger.info('Retrieving total counts for harvested datasets') + _logger.info("Retrieving total counts for harvested datasets") - sql = ''' + sql = """ SELECT to_char(mon, 'YYYY-MM') as month, COALESCE(count, 0) as count, @@ -451,7 +482,7 @@ def total_harvested_datasets(cls, from_date, to_date): ) cr USING (mon) GROUP BY mon, count, access_type ORDER BY mon; - ''' + """ access_types = cls._get_access_types() @@ -462,10 +493,9 @@ def total_harvested_datasets(cls, from_date, to_date): for access_type in access_types: cr.execute(sql, [from_date, to_date, access_type]) results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() + dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall() ] - grouped[access_type.split('/')[-1]] = results + grouped[access_type.split("/")[-1]] = results total = [] @@ -477,12 +507,12 @@ def total_harvested_datasets(cls, from_date, to_date): except IndexError: total.append(stats) else: - last['count'] += stats['count'] - last['count_cumulative'] += stats['count_cumulative'] + last["count"] += stats["count"] + last["count_cumulative"] += stats["count_cumulative"] - grouped['total'] = total + grouped["total"] = total - _logger.info('Done retrieving total counts') + _logger.info("Done retrieving total counts") return grouped @@ -491,9 +521,9 @@ def deprecated_datasets_cumulative(cls, from_date=None, to_date=None): """ Retrieve dataset count per month and monthly cumulative for deprecated datasets. """ - _logger.info('Retrieving total counts for deprecated datasets') + _logger.info("Retrieving total counts for deprecated datasets") - sql = ''' + sql = """ SELECT to_char(mon, 'YYYY-MM') as month, COALESCE(count, 0) as count, @@ -509,16 +539,13 @@ def deprecated_datasets_cumulative(cls, from_date=None, to_date=None): ) cr USING (mon) GROUP BY mon, count ORDER BY mon; - ''' + """ with connection.cursor() as cr: cr.execute(sql, [from_date, to_date]) - results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() - ] + results = [dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall()] - _logger.info('Done retrieving total deprecated counts') + _logger.info("Done retrieving total deprecated counts") return results @@ -528,9 +555,9 @@ def total_end_user_datasets(cls, from_date, to_date): Retrieve dataset count per month and monthly cumulative for datasets which have been created by end users using End User API. """ - _logger.info('Retrieving total counts for datasets created using End User API') + _logger.info("Retrieving total counts for datasets created using End User API") - sql = ''' + sql = """ SELECT to_char(mon, 'YYYY-MM') as month, COALESCE(count, 0) as count, @@ -547,24 +574,21 @@ def total_end_user_datasets(cls, from_date, to_date): ) cr USING (mon) GROUP BY mon, count ORDER BY mon; - ''' + """ with connection.cursor() as cr: cr.execute(sql, [from_date, to_date]) - results = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() - ] + results = [dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall()] - _logger.info('Done retrieving total counts') + _logger.info("Done retrieving total counts") return results @classmethod def unused_files(cls): - _logger.info('Retrieving total counts of files which are not part of any datasets...') + _logger.info("Retrieving total counts of files which are not part of any datasets...") - sql_get_unused_files_by_project = ''' + sql_get_unused_files_by_project = """ select count(f.id) as count, project_identifier from metax_api_file as f where not exists ( @@ -573,14 +597,13 @@ def unused_files(cls): where file_id = f.id ) group by project_identifier; - ''' + """ with connection.cursor() as cr: cr.execute(sql_get_unused_files_by_project) file_stats = [ - dict(zip([col[0] for col in cr.description], row)) - for row in cr.fetchall() + dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall() ] - _logger.info('Done retrieving total counts') + _logger.info("Done retrieving total counts") - return file_stats \ No newline at end of file + return file_stats diff --git a/src/metax_api/settings/__init__.py b/src/metax_api/settings/__init__.py index 38530c6f..fd0410ea 100755 --- a/src/metax_api/settings/__init__.py +++ b/src/metax_api/settings/__init__.py @@ -14,9 +14,7 @@ from metax_api.settings.components import BASE_DIR # src # Managing environment via DJANGO_ENV variable: -REFDATA_INDEXER_PATH = join( - BASE_DIR, "metax_api", "tasks", "refdata", "refdata_indexer" -) +REFDATA_INDEXER_PATH = join(BASE_DIR, "metax_api", "tasks", "refdata", "refdata_indexer") env = environ.Env( # set casting, default value LOGGING_LEVEL=(str, "INFO"), @@ -26,7 +24,7 @@ API_USERS_PATH=(str, "/etc/fairdata-metax/api_users"), DEBUG=(bool, False), DJANGO_ENV=(str, "local"), - ELASTIC_SEARCH_HOSTS=(list, ['localhost']), + ELASTIC_SEARCH_HOSTS=(list, ["localhost"]), ELASTIC_SEARCH_PORT=(int, 9200), ELASTIC_SEARCH_USE_SSL=(bool, False), ENABLE_V1_ENDPOINTS=(bool, True), @@ -34,14 +32,20 @@ ENABLE_DJANGO_WATCHMAN=(bool, False), ERROR_FILES_PATH=(str, join("/var", "log", "metax-api", "errors")), ES_CONFIG_DIR=(str, join(REFDATA_INDEXER_PATH, "resources", "es-config/")), - LOCAL_REF_DATA_FOLDER=(str, join(REFDATA_INDEXER_PATH, "resources", "local-refdata/")), + LOCAL_REF_DATA_FOLDER=( + str, + join(REFDATA_INDEXER_PATH, "resources", "local-refdata/"), + ), LOGGING_PATH=(str, join("/var", "log", "metax-api")), METAX_DATABASE_HOST=(str, "localhost"), METAX_DATABASE_PORT=(str, 5432), - ORG_FILE_PATH=(str, join(REFDATA_INDEXER_PATH, "resources", "organizations", "organizations.csv"),), + ORG_FILE_PATH=( + str, + join(REFDATA_INDEXER_PATH, "resources", "organizations", "organizations.csv"), + ), OAI_BASE_URL=(str, "https://metax.fd-dev.csc.fi/oai/"), OAI_BATCH_SIZE=(int, 25), - OAI_REPOSITORY_NAME=(str, 'Metax'), + OAI_REPOSITORY_NAME=(str, "Metax"), RABBIT_MQ_HOSTS=(list, ["localhost"]), RABBIT_MQ_PORT=(int, 5672), RABBIT_MQ_PASSWORD=(str, "guest"), diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 852d1b95..ff5c3045 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -4,23 +4,22 @@ from box import Box -api_permissions = Box({ - "rest": { - "apierrors": {}, - "datacatalogs": {}, - "datasets": {}, - "directories": {}, - "files": {}, - "filestorages": {}, - "schemas": {} +api_permissions = Box( + { + "rest": { + "apierrors": {}, + "datacatalogs": {}, + "datasets": {}, + "directories": {}, + "files": {}, + "filestorages": {}, + "schemas": {}, + }, + "rpc": {"datasets": {}, "elasticsearchs": {}, "files": {}, "statistics": {}}, }, - "rpc": { - "datasets": {}, - "elasticsearchs": {}, - "files": {}, - "statistics": {} - } -}, default_box_attr={}, default_box=True) + default_box_attr={}, + default_box=True, +) class Role(Enum): @@ -69,16 +68,53 @@ def __lt__(self, other): api_permissions.rest.datacatalogs["update"] = [Role.METAX, Role.ETSIN] api_permissions.rest.datacatalogs.delete = [Role.METAX, Role.ETSIN] -api_permissions.rest.datasets.create = [Role.METAX, Role.END_USERS, Role.TPAS, Role.QVAIN, Role.ETSIN] +api_permissions.rest.datasets.create = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] api_permissions.rest.datasets.read = [Role.ALL] -api_permissions.rest.datasets["update"] = [Role.METAX, Role.END_USERS, Role.TPAS, Role.QVAIN, Role.ETSIN] -api_permissions.rest.datasets.delete = [Role.METAX, Role.END_USERS, Role.TPAS, Role.QVAIN, Role.ETSIN] - -api_permissions.rest.directories.read = [Role.METAX, Role.QVAIN, Role.ETSIN, Role.TPAS, Role.FDS, Role.END_USERS] +api_permissions.rest.datasets["update"] = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] +api_permissions.rest.datasets.delete = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] + +api_permissions.rest.directories.read = [ + Role.METAX, + Role.QVAIN, + Role.ETSIN, + Role.TPAS, + Role.FDS, + Role.END_USERS, +] api_permissions.rest.files.create = [Role.METAX, Role.IDA, Role.TPAS] -api_permissions.rest.files.read = [Role.METAX, Role.IDA, Role.FDS, Role.TPAS, Role.END_USERS] -api_permissions.rest.files["update"] = [Role.METAX, Role.IDA, Role.TPAS, Role.FDS, Role.END_USERS] +api_permissions.rest.files.read = [ + Role.METAX, + Role.IDA, + Role.FDS, + Role.TPAS, + Role.END_USERS, +] +api_permissions.rest.files["update"] = [ + Role.METAX, + Role.IDA, + Role.TPAS, + Role.FDS, + Role.END_USERS, +] api_permissions.rest.files.delete = [Role.METAX, Role.IDA, Role.TPAS] api_permissions.rest.filestorages.create = [Role.METAX] @@ -112,6 +148,7 @@ def __lt__(self, other): api_permissions.rpc.statistics.organization_datasets_cumulative.use = [Role.ALL] api_permissions.rpc.statistics.unused_files.use = [Role.ALL] + def prepare_perm_values(d): new_d = d if hasattr(d, "items"): @@ -130,4 +167,4 @@ def prepare_perm_values(d): return new_d -API_ACCESS = prepare_perm_values(api_permissions.to_dict()) \ No newline at end of file +API_ACCESS = prepare_perm_values(api_permissions.to_dict()) diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 59bd1a31..cbf54286 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -17,7 +17,7 @@ ATT_DATA_CATALOG_IDENTIFIER, LEGACY_DATA_CATALOG_IDENTIFIER, DFT_DATA_CATALOG_IDENTIFIER, - PAS_DATA_CATALOG_IDENTIFIER + PAS_DATA_CATALOG_IDENTIFIER, ] # catalogs where uniqueness of dataset pids is not enforced. diff --git a/src/metax_api/settings/components/elasticsearch.py b/src/metax_api/settings/components/elasticsearch.py index ead9c790..8227bc75 100755 --- a/src/metax_api/settings/components/elasticsearch.py +++ b/src/metax_api/settings/components/elasticsearch.py @@ -3,7 +3,7 @@ ELASTICSEARCH = { "HOSTS": env("ELASTIC_SEARCH_HOSTS"), "PORT": env("ELASTIC_SEARCH_PORT"), - "USE_SSL": env("ELASTIC_SEARCH_USE_SSL") + "USE_SSL": env("ELASTIC_SEARCH_USE_SSL"), } ALWAYS_RELOAD_REFERENCE_DATA_ON_RESTART = env("ALWAYS_RELOAD_REFERENCE_DATA_ON_RESTART") diff --git a/src/metax_api/settings/components/rabbitmq.py b/src/metax_api/settings/components/rabbitmq.py index 8bf1809b..4e4a2f37 100755 --- a/src/metax_api/settings/components/rabbitmq.py +++ b/src/metax_api/settings/components/rabbitmq.py @@ -19,10 +19,10 @@ "QUEUES": [ { "NAME": "ttv-operations", - #"ROUTING_KEY": "some_key" + # "ROUTING_KEY": "some_key" } - ] - } + ], + }, ], } RABBIT_MQ_USE_VHOST = env("RABBIT_MQ_USE_VHOST") diff --git a/src/metax_api/settings/environments/local.py b/src/metax_api/settings/environments/local.py index 02899153..f0feecba 100755 --- a/src/metax_api/settings/environments/local.py +++ b/src/metax_api/settings/environments/local.py @@ -3,15 +3,13 @@ ALLOWED_HOSTS += ["*"] -if 'debug_toolbar' not in INSTALLED_APPS: - INSTALLED_APPS += ['debug_toolbar'] -if 'debug_toolbar.middleware.DebugToolbarMiddleware' not in MIDDLEWARE: - MIDDLEWARE = ['debug_toolbar.middleware.DebugToolbarMiddleware'] + MIDDLEWARE +if "debug_toolbar" not in INSTALLED_APPS: + INSTALLED_APPS += ["debug_toolbar"] +if "debug_toolbar.middleware.DebugToolbarMiddleware" not in MIDDLEWARE: + MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"] + MIDDLEWARE + +INTERNAL_IPS = ["127.0.0.1", "0.0.0.0"] -INTERNAL_IPS = [ - '127.0.0.1', - '0.0.0.0' -] def show_toolbar(request): if DEBUG: @@ -19,6 +17,7 @@ def show_toolbar(request): else: return False + DEBUG_TOOLBAR_CONFIG = { "SHOW_TOOLBAR_CALLBACK": show_toolbar, } diff --git a/src/metax_api/settings/environments/production.py b/src/metax_api/settings/environments/production.py index 9506ea86..cf5223cb 100644 --- a/src/metax_api/settings/environments/production.py +++ b/src/metax_api/settings/environments/production.py @@ -3,4 +3,4 @@ api_permissions.rpc.files.flush_project.use.clear() -API_ACCESS = prepare_perm_values(api_permissions.to_dict()) \ No newline at end of file +API_ACCESS = prepare_perm_values(api_permissions.to_dict()) diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index fa7e2520..ae2d77b6 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -1,4 +1,3 @@ - from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 @@ -11,8 +10,23 @@ api_permissions.rest.files.read += [Role.QVAIN, Role.QVAIN_LIGHT] api_permissions.rest.files["update"] += [Role.QVAIN, Role.QVAIN_LIGHT] -api_permissions.rpc.datasets.change_cumulative_state.use = [Role.METAX, Role.QVAIN, Role.QVAIN_LIGHT, Role.END_USERS] -api_permissions.rpc.datasets.fix_deprecated.use = [Role.METAX, Role.QVAIN, Role.QVAIN_LIGHT, Role.END_USERS] -api_permissions.rpc.dataset.refresh_directory_content.use = [Role.METAX, Role.QVAIN, Role.QVAIN_LIGHT, Role.END_USERS] +api_permissions.rpc.datasets.change_cumulative_state.use = [ + Role.METAX, + Role.QVAIN, + Role.QVAIN_LIGHT, + Role.END_USERS, +] +api_permissions.rpc.datasets.fix_deprecated.use = [ + Role.METAX, + Role.QVAIN, + Role.QVAIN_LIGHT, + Role.END_USERS, +] +api_permissions.rpc.dataset.refresh_directory_content.use = [ + Role.METAX, + Role.QVAIN, + Role.QVAIN_LIGHT, + Role.END_USERS, +] -API_ACCESS = prepare_perm_values(api_permissions) \ No newline at end of file +API_ACCESS = prepare_perm_values(api_permissions) diff --git a/src/metax_api/tasks/refdata/refdata_indexer/domain/indexable_data.py b/src/metax_api/tasks/refdata/refdata_indexer/domain/indexable_data.py index 7fd35581..2f75158c 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/domain/indexable_data.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/domain/indexable_data.py @@ -17,9 +17,7 @@ def __init__(self, doc_id, doc_type, label, uri, same_as, scheme): self.doc_type = doc_type self.doc_id = self._create_es_document_id(doc_id) - self.label = ( - label # { 'fi': 'value1', 'en': 'value2',..., 'und': 'default_value' } - ) + self.label = label # { 'fi': 'value1', 'en': 'value2',..., 'und': 'default_value' } self.same_as = same_as self.code = doc_id diff --git a/src/metax_api/tasks/refdata/refdata_indexer/es_index_data.py b/src/metax_api/tasks/refdata/refdata_indexer/es_index_data.py index 64e8d85f..9876bb4c 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/es_index_data.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/es_index_data.py @@ -5,7 +5,9 @@ from metax_api.tasks.refdata.refdata_indexer.domain.indexable_data import IndexableData as IdxData from metax_api.tasks.refdata.refdata_indexer.domain.reference_data import ReferenceData as RefData -from metax_api.tasks.refdata.refdata_indexer.service.elasticsearch_service import ElasticSearchService as ESS +from metax_api.tasks.refdata.refdata_indexer.service.elasticsearch_service import ( + ElasticSearchService as ESS, +) from metax_api.tasks.refdata.refdata_indexer.service.finto_data_service import FintoDataService # from service.infra_data_service import InfraDataService @@ -58,9 +60,7 @@ def index_data(): for data_type in RefData.FINTO_REF_DATA_TYPES: finto_es_data_models = finto_service.get_data(data_type) if len(finto_es_data_models) == 0: - _logger.info( - "No data models to reindex for finto data type {0}".format(data_type) - ) + _logger.info("No data models to reindex for finto data type {0}".format(data_type)) continue es.delete_and_update_indexable_data( diff --git a/src/metax_api/tasks/refdata/refdata_indexer/service/finto_data_service.py b/src/metax_api/tasks/refdata/refdata_indexer/service/finto_data_service.py index 086e9c15..67f31e11 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/service/finto_data_service.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/service/finto_data_service.py @@ -75,13 +75,11 @@ def _parse_finto_data(self, graph, data_type): ) # parents (broader) parent_ids = [ - self._get_uri_end_part(parent) - for parent in graph.objects(concept, SKOS.broader) + self._get_uri_end_part(parent) for parent in graph.objects(concept, SKOS.broader) ] # children (narrower) child_ids = [ - self._get_uri_end_part(child) - for child in graph.objects(concept, SKOS.narrower) + self._get_uri_end_part(child) for child in graph.objects(concept, SKOS.narrower) ] same_as = [] wkt = "" @@ -145,13 +143,11 @@ def _fetch_finto_data(self, data_type): if not str_error: return g else: - _logger.error( - "Failed to read Finto data of type %s, skipping.." % data_type - ) + _logger.error("Failed to read Finto data of type %s, skipping.." % data_type) return None def _get_uri_end_part(self, uri): - return uri[uri.rindex("/") + 1:].strip() + return uri[uri.rindex("/") + 1 :].strip() def _get_coordinates_for_location_from_url(self, url): sleep_time = 2 diff --git a/src/metax_api/tasks/refdata/refdata_indexer/service/organization_service.py b/src/metax_api/tasks/refdata/refdata_indexer/service/organization_service.py index 499d3e3f..fb75d0be 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/service/organization_service.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/service/organization_service.py @@ -32,9 +32,7 @@ def get_data(self): same_as = org.get("same_as", []) org_csc = org.get("org_csc", "") index_data_models.append( - OrganizationData( - org["org_id"], org["label"], parent_id, same_as, org_csc - ) + OrganizationData(org["org_id"], org["label"], parent_id, same_as, org_csc) ) os.remove(self.INPUT_FILE) diff --git a/src/metax_api/tests/api/oaipmh/minimal_api.py b/src/metax_api/tests/api/oaipmh/minimal_api.py index 15fdae60..c2c5ef51 100755 --- a/src/metax_api/tests/api/oaipmh/minimal_api.py +++ b/src/metax_api/tests/api/oaipmh/minimal_api.py @@ -23,27 +23,29 @@ class OAIPMHReadTests(APITestCase, TestClassUtils): - _namespaces = {'o': 'http://www.openarchives.org/OAI/2.0/', - 'oai_dc': "http://www.openarchives.org/OAI/2.0/oai_dc/", - 'dc': "http://purl.org/dc/elements/1.1/", - 'dct': "http://purl.org/dc/terms/", - 'datacite': 'http://schema.datacite.org/oai/oai-1.0/'} + _namespaces = { + "o": "http://www.openarchives.org/OAI/2.0/", + "oai_dc": "http://www.openarchives.org/OAI/2.0/oai_dc/", + "dc": "http://purl.org/dc/elements/1.1/", + "dct": "http://purl.org/dc/terms/", + "datacite": "http://schema.datacite.org/oai/oai-1.0/", + } @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(OAIPMHReadTests, cls).setUpClass() def setUp(self): cr = CatalogRecord.objects.get(pk=1) - cr.data_catalog.catalog_json['identifier'] = ATT_CATALOG + cr.data_catalog.catalog_json["identifier"] = ATT_CATALOG cr.data_catalog.force_save() cr = CatalogRecord.objects.get(pk=14) - cr.data_catalog.catalog_json['identifier'] = IDA_CATALOG + cr.data_catalog.catalog_json["identifier"] = IDA_CATALOG cr.data_catalog.force_save() # some cr that has publisher set... @@ -65,21 +67,21 @@ def _get_single_result(self, data, xpath): def _set_dataset_as_draft(self, cr_id): cr = CatalogRecord.objects.get(pk=cr_id) - cr.state = 'draft' + cr.state = "draft" cr.force_save() -# VERB: Identify + # VERB: Identify def test_identify(self): - response = self.client.get('/oai/?verb=Identify') + response = self.client.get("/oai/?verb=Identify") self.assertEqual(response.status_code, status.HTTP_200_OK) -# VERB: ListMetadataFormats + # VERB: ListMetadataFormats def test_list_metadata_formats(self): - response = self.client.get('/oai/?verb=ListMetadataFormats') + response = self.client.get("/oai/?verb=ListMetadataFormats") self.assertEqual(response.status_code, status.HTTP_200_OK) - formats = self._get_single_result(response.content, '//o:ListMetadataFormats') + formats = self._get_single_result(response.content, "//o:ListMetadataFormats") self.assertEqual(len(formats), 4) metadataPrefix = self._get_results(formats, '//o:metadataPrefix[text() = "oai_dc"]') @@ -88,322 +90,378 @@ def test_list_metadata_formats(self): metadataPrefix = self._get_results(formats, '//o:metadataPrefix[text() = "oai_datacite"]') self.assertEqual(len(metadataPrefix), 1) - metadataPrefix = self._get_results(formats, '//o:metadataPrefix[text() = "oai_dc_urnresolver"]') + metadataPrefix = self._get_results( + formats, '//o:metadataPrefix[text() = "oai_dc_urnresolver"]' + ) self.assertEqual(len(metadataPrefix), 1) -# VERB: ListSets + # VERB: ListSets def test_list_sets(self): - response = self.client.get('/oai/?verb=ListSets') + response = self.client.get("/oai/?verb=ListSets") self.assertEqual(response.status_code, status.HTTP_200_OK) - sets = self._get_results(response.content, '//o:setSpec') + sets = self._get_results(response.content, "//o:setSpec") # urnresolver set should be hidden self.assertEquals(len(sets), 4) -# VERB: ListIdentifiers + # VERB: ListIdentifiers def test_list_identifiers(self): - ms = settings.OAI['BATCH_SIZE'] + ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter())[:ms] - response = self.client.get('/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc') + data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() + )[:ms] + response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc") self.assertEqual(response.status_code, status.HTTP_200_OK) - headers = self._get_results(response.content, '//o:header') + headers = self._get_results(response.content, "//o:header") self.assertTrue(len(headers) == len(allRecords), len(headers)) - response = self.client.get('/oai/?verb=ListIdentifiers&metadataPrefix=oai_datacite') + response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK) - headers = self._get_results(response.content, '//o:header') + headers = self._get_results(response.content, "//o:header") self.assertTrue(len(headers) == len(allRecords), len(headers)) - response = self.client.get('/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc_urnresolver') + response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc_urnresolver") self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) def test_list_identifiers_for_drafts(self): - ''' Tests that drafts are not returned from ListIdentifiers ''' - ms = settings.OAI['BATCH_SIZE'] + """ Tests that drafts are not returned from ListIdentifiers """ + ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter())[:ms] + data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() + )[:ms] self._set_dataset_as_draft(25) self._set_dataset_as_draft(26) # headers should be reduced when some datasets are set as drafts - response = self.client.get('/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc') + response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc") self.assertEqual(response.status_code, status.HTTP_200_OK) - headers = self._get_results(response.content, '//o:header') + headers = self._get_results(response.content, "//o:header") self.assertFalse(len(headers) == len(allRecords), len(headers)) def test_list_identifiers_from_datacatalogs_set(self): - allRecords = DataCatalog.objects.all()[:settings.OAI['BATCH_SIZE']] - response = self.client.get('/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc&set=datacatalogs') + allRecords = DataCatalog.objects.all()[: settings.OAI["BATCH_SIZE"]] + response = self.client.get( + "/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc&set=datacatalogs" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:header') + records = self._get_results(response.content, "//o:header") self.assertTrue(len(records) == len(allRecords), len(records)) -# VERB: ListRecords + # VERB: ListRecords def test_list_records(self): - ms = settings.OAI['BATCH_SIZE'] + ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter())[:ms] + data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() + )[:ms] - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords)) - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_fairdata_datacite') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_fairdata_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords)) - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords)) def test_list_records_for_drafts(self): - ''' Tests that drafts are not returned from ListRecords ''' - ms = settings.OAI['BATCH_SIZE'] + """ Tests that drafts are not returned from ListRecords """ + ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter())[:ms] + data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() + )[:ms] self._set_dataset_as_draft(25) self._set_dataset_as_draft(26) - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_fairdata_datacite') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_fairdata_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertFalse(len(records) == len(allRecords)) def test_list_records_urnresolver_from_datacatalogs_set(self): - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=datacatalogs') + response = self.client.get( + "/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=datacatalogs" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=ida_datasets') + response = self.client.get( + "/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=ida_datasets" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=invalid') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=invalid") self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) def test_list_records_urnresolver_for_datasets_set(self): - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=datasets') + response = self.client.get( + "/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=datasets" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - headers = self._get_results(response.content, '//o:header') + headers = self._get_results(response.content, "//o:header") self.assertTrue(len(headers) > 0) - records = self._get_results(response.content, '//oai_dc:dc') + records = self._get_results(response.content, "//oai_dc:dc") for record_metadata in records: - urn_elements = self._get_results(record_metadata, 'dc:identifier[starts-with(text(), "urn")]') + urn_elements = self._get_results( + record_metadata, 'dc:identifier[starts-with(text(), "urn")]' + ) self.assertTrue(urn_elements is not None or len(urn_elements) > 0) - url_element = self._get_single_result(record_metadata, 'dc:identifier[starts-with(text(), "http")]') + url_element = self._get_single_result( + record_metadata, 'dc:identifier[starts-with(text(), "http")]' + ) self.assertTrue(url_element is not None) def test_list_records_from_datasets_set(self): - ms = settings.OAI['BATCH_SIZE'] + ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter())[:ms] + data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() + )[:ms] - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=datasets') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords), len(records)) def test_list_records_from_datacatalogs_set(self): - allRecords = DataCatalog.objects.all()[:settings.OAI['BATCH_SIZE']] + allRecords = DataCatalog.objects.all()[: settings.OAI["BATCH_SIZE"]] - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=datacatalogs') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=datacatalogs") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords), len(records)) def test_list_records_from_att_datasets_set(self): allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=[ATT_CATALOG])[:settings.OAI['BATCH_SIZE']] + data_catalog__catalog_json__identifier__in=[ATT_CATALOG] + )[: settings.OAI["BATCH_SIZE"]] - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=att_datasets') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=att_datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords), len(records)) def test_list_records_from_ida_datasets_set(self): allRecords = CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=[IDA_CATALOG])[:settings.OAI['BATCH_SIZE']] + data_catalog__catalog_json__identifier__in=[IDA_CATALOG] + )[: settings.OAI["BATCH_SIZE"]] - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=ida_datasets') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=ida_datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) - records = self._get_results(response.content, '//o:record') + records = self._get_results(response.content, "//o:record") self.assertTrue(len(records) == len(allRecords)) def test_list_records_using_invalid_metadata_prefix(self): - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_notavailable') + response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_notavailable") self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="cannotDisseminateFormat"]') self.assertTrue(len(errors) == 1, response.content) response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_notavailable' % self.identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_notavailable" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="cannotDisseminateFormat"]') self.assertTrue(len(errors) == 1, response.content) def test_distinct_records_in_set(self): - att_resp = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=att_datasets') + att_resp = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=att_datasets") self.assertEqual(att_resp.status_code, status.HTTP_200_OK) - ida_resp = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=ida_datasets') + ida_resp = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc&set=ida_datasets") self.assertEqual(ida_resp.status_code, status.HTTP_200_OK) - att_records = self._get_results(att_resp.content, '//o:record') + att_records = self._get_results(att_resp.content, "//o:record") att_identifier = att_records[0][0][0].text - ida_records = self._get_results(ida_resp.content, - '//o:record/o:header/o:identifier[text()="%s"]' % att_identifier) + ida_records = self._get_results( + ida_resp.content, + '//o:record/o:header/o:identifier[text()="%s"]' % att_identifier, + ) self.assertTrue(len(ida_records) == 0) -# VERB: GetRecord + # VERB: GetRecord def test_record_get_datacatalog(self): dc = DataCatalog.objects.get(pk=1) - dc_identifier = dc.catalog_json['identifier'] + dc_identifier = dc.catalog_json["identifier"] response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc' % dc_identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc" % dc_identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % - dc_identifier) + identifiers = self._get_results( + response.content, + '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % dc_identifier, + ) self.assertTrue(len(identifiers) == 1, response.content) def test_get_record(self): response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc' % self.identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier) + identifiers = self._get_results( + response.content, + '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier, + ) self.assertTrue(len(identifiers) == 1, response.content) response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_fairdata_datacite' % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:metadata/datacite:oai_fairdata_datacite/' + - 'datacite:schemaVersion[text()="%s"]' % '4.1') + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_fairdata_datacite" + % self.identifier + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + identifiers = self._get_results( + response.content, + "//o:record/o:metadata/datacite:oai_fairdata_datacite/" + + 'datacite:schemaVersion[text()="%s"]' % "4.1", + ) self.assertTrue(len(identifiers) == 1, response.content) - identifiers = self._get_results(response.content, - '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier) + identifiers = self._get_results( + response.content, + '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier, + ) self.assertTrue(len(identifiers) == 1, response.content) def test_get_record_for_drafts(self): - ''' Tests that GetRecord doesn't return drafts ''' + """ Tests that GetRecord doesn't return drafts """ response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc' % self.identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier) + identifiers = self._get_results( + response.content, + '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier, + ) self.assertTrue(len(identifiers) == 1, response.content) # Set same dataset as draft self._set_dataset_as_draft(self.id) response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc' % self.identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier) + identifiers = self._get_results( + response.content, + '//o:record/o:header/o:identifier[text()="%s"]' % self.identifier, + ) self.assertTrue(len(identifiers) == 0, response.content) def test_get_record_non_existing(self): - response = self.client.get('/oai/?verb=GetRecord&identifier=urn:non:existing&metadataPrefix=oai_dc') + response = self.client.get( + "/oai/?verb=GetRecord&identifier=urn:non:existing&metadataPrefix=oai_dc" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="idDoesNotExist"]') self.assertTrue(len(errors) == 1, response.content) def test_get_record_urnresolver(self): response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc_urnresolver' % self.identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc_urnresolver" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) def test_get_record_datacatalog_unsupported_in_urnresolver(self): dc = DataCatalog.objects.get(pk=1) - dc_identifier = dc.catalog_json['identifier'] + dc_identifier = dc.catalog_json["identifier"] response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc_urnresolver' % dc_identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc_urnresolver" % dc_identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) def test_get_record_datacatalog_unsupported_in_datacite(self): dc = DataCatalog.objects.get(pk=1) - dc_identifier = dc.catalog_json['identifier'] + dc_identifier = dc.catalog_json["identifier"] response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_datacite' % dc_identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_datacite" % dc_identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) errors = self._get_results(response.content, '//o:error[@code="badArgument"]') self.assertTrue(len(errors) == 1, response.content) def test_get_record_legacy_catalog_datasets_are_not_urnresolved(self): cr = CatalogRecord.objects.get(identifier=self.identifier) - cr.data_catalog.catalog_json['identifier'] = settings.LEGACY_CATALOGS[0] + cr.data_catalog.catalog_json["identifier"] = settings.LEGACY_CATALOGS[0] cr.data_catalog.force_save() response = self.client.get( - '/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc_urnresolver&set=datasets' % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % - self.preferred_identifier) + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc_urnresolver&set=datasets" + % self.identifier + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + identifiers = self._get_results( + response.content, + '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' + % self.preferred_identifier, + ) self.assertTrue(len(identifiers) == 0, response.content) -# OAI-PMH Utilities & functionalities + # OAI-PMH Utilities & functionalities def test_write_oai_dc_with_lang(self): from metax_api.api.oaipmh.base.view import oai_dc_writer_with_lang + e = Element("Test") md = { - 'title': [{'value': 'title1', 'lang': 'en'}, {'value': 'title2', 'lang': 'fi'}], - 'description': [{'value': 'value'}] + "title": [ + {"value": "title1", "lang": "en"}, + {"value": "title2", "lang": "fi"}, + ], + "description": [{"value": "value"}], } - metadata = common.Metadata('', md) + metadata = common.Metadata("", md) oai_dc_writer_with_lang(e, metadata) result = str(lxml.etree.tostring(e, pretty_print=True)) self.assertTrue('title1' in result) self.assertTrue('title2' in result) - self.assertTrue('value' in result) + self.assertTrue("value" in result) def test_get_oai_dc_metadata_dataset(self): cr = CatalogRecord.objects.get(pk=11) from metax_api.api.oaipmh.base.metax_oai_server import MetaxOAIServer + s = MetaxOAIServer() md = s._get_oai_dc_metadata(cr, cr.research_dataset) - self.assertTrue('identifier' in md) - self.assertTrue('title' in md) - self.assertTrue('lang' in md['title'][0]) + self.assertTrue("identifier" in md) + self.assertTrue("title" in md) + self.assertTrue("lang" in md["title"][0]) def test_get_oai_dc_metadata_datacatalog(self): dc = DataCatalog.objects.get(pk=1) from metax_api.api.oaipmh.base.metax_oai_server import MetaxOAIServer + s = MetaxOAIServer() md = s._get_oai_dc_metadata(dc, dc.catalog_json) - self.assertTrue('identifier' in md) - self.assertTrue('title' in md) - self.assertTrue('lang' in md['title'][0]) + self.assertTrue("identifier" in md) + self.assertTrue("title" in md) + self.assertTrue("lang" in md["title"][0]) def test_sensitive_fields_are_removed(self): """ Ensure some sensitive fields are never present in output of OAI-PMH apis """ - sensitive_field_values = ['email@mail.com', '999-123-123', '999-456-456'] + sensitive_field_values = ["email@mail.com", "999-123-123", "999-456-456"] def _check_fields(content): """ @@ -411,21 +469,30 @@ def _check_fields(content): of field name, since the field names might be different in Datacite etc other formats. """ for sensitive_field_value in sensitive_field_values: - self.assertEqual(sensitive_field_value not in str(content), True, - 'field %s should have been stripped' % sensitive_field_value) + self.assertEqual( + sensitive_field_value not in str(content), + True, + "field %s should have been stripped" % sensitive_field_value, + ) for cr in CatalogRecord.objects.filter(identifier=self.identifier): - cr.research_dataset['curator'][0].update({ - 'email': sensitive_field_values[0], - 'phone': sensitive_field_values[1], - 'telephone': sensitive_field_values[2], - }) + cr.research_dataset["curator"][0].update( + { + "email": sensitive_field_values[0], + "phone": sensitive_field_values[1], + "telephone": sensitive_field_values[2], + } + ) cr.force_save() - response = self.client.get('/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc' % self.identifier) + response = self.client.get( + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_dc" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) _check_fields(response.content) - response = self.client.get('/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_datacite' % self.identifier) + response = self.client.get( + "/oai/?verb=GetRecord&identifier=%s&metadataPrefix=oai_datacite" % self.identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) _check_fields(response.content) diff --git a/src/metax_api/tests/api/oaipmh/syke.py b/src/metax_api/tests/api/oaipmh/syke.py index 647469a4..2c0622a2 100755 --- a/src/metax_api/tests/api/oaipmh/syke.py +++ b/src/metax_api/tests/api/oaipmh/syke.py @@ -17,35 +17,35 @@ class SYKEOAIPMHReadTests(APITestCase, TestClassUtils): - _namespaces = {'o': 'http://www.openarchives.org/OAI/2.0/', - 'oai_dc': "http://www.openarchives.org/OAI/2.0/oai_dc/", - 'dc': "http://purl.org/dc/elements/1.1/", - 'dct': "http://purl.org/dc/terms/", - 'datacite': 'http://schema.datacite.org/oai/oai-1.0/'} + _namespaces = { + "o": "http://www.openarchives.org/OAI/2.0/", + "oai_dc": "http://www.openarchives.org/OAI/2.0/oai_dc/", + "dc": "http://purl.org/dc/elements/1.1/", + "dct": "http://purl.org/dc/terms/", + "datacite": "http://schema.datacite.org/oai/oai-1.0/", + } @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(SYKEOAIPMHReadTests, cls).setUpClass() def setUp(self): cr = CatalogRecord.objects.get(pk=1) cr.data_catalog.catalog_json["identifier"] = "urn:nbn:fi:att:data-catalog-harvest-syke" cr.data_catalog.force_save() - cr.research_dataset.update({ - "preferred_identifier": "urn:nbn:fi:csc-kata20170613100856741858", - "other_identifier": [ - { - "notation": "{55AB842F-9CED-4E80-A7E5-07A54F0AE4A4}" - } - ] - }) + cr.research_dataset.update( + { + "preferred_identifier": "urn:nbn:fi:csc-kata20170613100856741858", + "other_identifier": [{"notation": "{55AB842F-9CED-4E80-A7E5-07A54F0AE4A4}"}], + } + ) cr.force_save() self.identifier = cr.identifier - self.pref_identifier = cr.research_dataset['preferred_identifier'] + self.pref_identifier = cr.research_dataset["preferred_identifier"] self.dc = cr.data_catalog.catalog_json["identifier"] self._use_http_authorization() @@ -56,13 +56,19 @@ def _get_results(self, data, xpath): return root.xpath(xpath, namespaces=self._namespaces) def test_get_urn_resolver_record(self): - response = self.client.get('/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=datasets') + response = self.client.get( + "/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver&set=datasets" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - identifiers = self._get_results(response.content, - '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % self.pref_identifier) + identifiers = self._get_results( + response.content, + '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % self.pref_identifier, + ) self.assertTrue(len(identifiers) == 1, response.content) - syke_url = SYKE_URL_PREFIX_TEMPLATE % '{55AB842F-9CED-4E80-A7E5-07A54F0AE4A4}' - identifiers = self._get_results(response.content, - '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % syke_url) + syke_url = SYKE_URL_PREFIX_TEMPLATE % "{55AB842F-9CED-4E80-A7E5-07A54F0AE4A4}" + identifiers = self._get_results( + response.content, + '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % syke_url, + ) self.assertTrue(len(identifiers) == 1, response.content) diff --git a/src/metax_api/tests/api/rest/base/serializers/file_serializer.py b/src/metax_api/tests/api/rest/base/serializers/file_serializer.py index 5db7e04d..ab40a016 100755 --- a/src/metax_api/tests/api/rest/base/serializers/file_serializer.py +++ b/src/metax_api/tests/api/rest/base/serializers/file_serializer.py @@ -16,26 +16,27 @@ class LightFileSerializerTests(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super().setUpClass() def test_ls_field_list(self): - lfs_field_list = LightFileSerializer.ls_field_list(['identifier', 'parent_directory', 'file_storage']) - self.assertEqual('parent_directory__identifier' in lfs_field_list, True) - self.assertEqual('file_storage__file_storage_json' in lfs_field_list, True) + lfs_field_list = LightFileSerializer.ls_field_list( + ["identifier", "parent_directory", "file_storage"] + ) + self.assertEqual("parent_directory__identifier" in lfs_field_list, True) + self.assertEqual("file_storage__file_storage_json" in lfs_field_list, True) queryset = File.objects.filter(id__in=[1, 2]).values(*lfs_field_list) lfs_output = LightFileSerializer.serialize(queryset) self.assertEqual(len(lfs_output), 2) f = lfs_output[0] - self.assertEqual('parent_directory' in f, True) - self.assertEqual('identifier' in f['parent_directory'], True) - self.assertEqual('file_storage' in f, True) - self.assertEqual('identifier' in f['file_storage'], True) + self.assertEqual("parent_directory" in f, True) + self.assertEqual("identifier" in f["parent_directory"], True) + self.assertEqual("file_storage" in f, True) + self.assertEqual("identifier" in f["file_storage"], True) def test_serializer_outputs_are_the_same(self): fs_output = FileSerializer(File.objects.get(pk=1)).data @@ -43,7 +44,7 @@ def test_serializer_outputs_are_the_same(self): lfs_field_list = LightFileSerializer.ls_field_list() lfs_output = LightFileSerializer.serialize(File.objects.values(*lfs_field_list).get(pk=1)) - self.assertEqual(len(fs_output), len(lfs_output), 'number of keys should match') + self.assertEqual(len(fs_output), len(lfs_output), "number of keys should match") self._assert_keys_match_in_dict(fs_output, lfs_output) def test_serializer_error_reporting(self): @@ -57,11 +58,11 @@ def test_serializer_error_reporting(self): def _assert_keys_match_in_dict(self, ser_dict, ls_dict): for key, value in ser_dict.items(): - self.assertEqual(key in ls_dict, True, 'key should be present in boths outputs') + self.assertEqual(key in ls_dict, True, "key should be present in boths outputs") if isinstance(ls_dict[key], datetime): # in the api datetimes are converted to str by django later - probably the value is ok here pass elif isinstance(value, dict): self._assert_keys_match_in_dict(value, ls_dict[key]) else: - self.assertEqual(value, ls_dict[key], 'value should be same both outputs') + self.assertEqual(value, ls_dict[key], "value should be same both outputs") diff --git a/src/metax_api/tests/api/rest/base/serializers/serializer_utils.py b/src/metax_api/tests/api/rest/base/serializers/serializer_utils.py index 859708cf..4f45e303 100755 --- a/src/metax_api/tests/api/rest/base/serializers/serializer_utils.py +++ b/src/metax_api/tests/api/rest/base/serializers/serializer_utils.py @@ -13,7 +13,7 @@ from metax_api.models import CatalogRecord from metax_api.tests.utils import TestClassUtils, get_json_schema, test_data_file_path -schema = get_json_schema('ida_dataset') +schema = get_json_schema("ida_dataset") class ValidateJsonTests(APITestCase, TestClassUtils): @@ -24,7 +24,7 @@ class ValidateJsonTests(APITestCase, TestClassUtils): @classmethod def setUpClass(cls): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super().setUpClass() def test_validate_date_field(self): @@ -33,27 +33,27 @@ def test_validate_date_field(self): have the optional "format": "date" applied, and use a format checker during schema validation. Ensure it works. """ - rd = CatalogRecord.objects.values('research_dataset').get(pk=1)['research_dataset'] + rd = CatalogRecord.objects.values("research_dataset").get(pk=1)["research_dataset"] - rd['issued'] = '2018-09-29' + rd["issued"] = "2018-09-29" try: validate_json(rd, schema) except ValidationError: - self.fail('Validation raised ValidationError - should have validated ok') + self.fail("Validation raised ValidationError - should have validated ok") - rd['issued'] = 'absolutely should fail' + rd["issued"] = "absolutely should fail" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['issued'] = '2018-09-29T20:20:20+03:00' + rd["issued"] = "2018-09-29T20:20:20+03:00" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['issued'] = '2018-09-29T20:20:20' + rd["issued"] = "2018-09-29T20:20:20" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['issued'] = '2018-09-29 20:20:20' + rd["issued"] = "2018-09-29 20:20:20" with self.assertRaises(ValidationError): validate_json(rd, schema) @@ -63,37 +63,37 @@ def test_validate_date_time_field(self): have the optional "format": "date-time" applied, and use a format checker during schema validation. Ensure it works. """ - rd = CatalogRecord.objects.values('research_dataset').get(pk=1)['research_dataset'] + rd = CatalogRecord.objects.values("research_dataset").get(pk=1)["research_dataset"] - rd['modified'] = '2018-09-29T20:20:20+03:00' + rd["modified"] = "2018-09-29T20:20:20+03:00" try: validate_json(rd, schema) except ValidationError: - self.fail('Validation raised ValidationError - should have validated ok') + self.fail("Validation raised ValidationError - should have validated ok") # note: jsonschema.FormatChecker accepts below also - rd['modified'] = '2018-09-29T20:20:20.123456789+03:00' + rd["modified"] = "2018-09-29T20:20:20.123456789+03:00" try: validate_json(rd, schema) except ValidationError: - self.fail('Validation raised ValidationError - should have validated ok') + self.fail("Validation raised ValidationError - should have validated ok") - rd['modified'] = 'absolutely should fail' + rd["modified"] = "absolutely should fail" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['modified'] = '2018-09-29' + rd["modified"] = "2018-09-29" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['modified'] = '2018-09-29 20:20:20' + rd["modified"] = "2018-09-29 20:20:20" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['modified'] = '2018-09-29T20:20:20' + rd["modified"] = "2018-09-29T20:20:20" with self.assertRaises(ValidationError): validate_json(rd, schema) - rd['modified'] = '2018-09-29 20:20:20+03:00' + rd["modified"] = "2018-09-29 20:20:20+03:00" with self.assertRaises(ValidationError): validate_json(rd, schema) diff --git a/src/metax_api/tests/api/rest/base/views/apierrors/read.py b/src/metax_api/tests/api/rest/base/views/apierrors/read.py index da61ec61..a3338f29 100755 --- a/src/metax_api/tests/api/rest/base/views/apierrors/read.py +++ b/src/metax_api/tests/api/rest/base/views/apierrors/read.py @@ -36,9 +36,7 @@ def setUp(self): super(ApiErrorReadBasicTests, self).setUp() rmtree(settings.ERROR_FILES_PATH, ignore_errors=True) makedirs(settings.ERROR_FILES_PATH) - self._use_http_authorization( - username="metax" - ) + self._use_http_authorization(username="metax") def _assert_fields_presence(self, response): """ @@ -87,9 +85,7 @@ def test_get_error_details(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual("identifier" in response.data[0], True, response.data) - response = self.client.get( - "/rest/apierrors/%s" % response.data[0]["identifier"] - ) + response = self.client.get("/rest/apierrors/%s" % response.data[0]["identifier"]) self._assert_fields_presence(response) self.assertEqual( "data_catalog" in response.data["response"], True, response.data["response"] @@ -111,12 +107,8 @@ def test_delete_error_details(self): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) response = self.client.get("/rest/apierrors") - response = self.client.delete( - "/rest/apierrors/%s" % response.data[0]["identifier"] - ) - self.assertEqual( - response.status_code, status.HTTP_204_NO_CONTENT, response.data - ) + response = self.client.delete("/rest/apierrors/%s" % response.data[0]["identifier"]) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) response = self.client.get("/rest/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @@ -129,13 +121,9 @@ def test_delete_all_error_details(self): cr_1.pop("data_catalog") # causes an error response = self.client.post("/rest/datasets", cr_1, format="json") - self.assertEqual( - response.status_code, status.HTTP_400_BAD_REQUEST, response.data - ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) response = self.client.post("/rest/datasets", cr_1, format="json") - self.assertEqual( - response.status_code, status.HTTP_400_BAD_REQUEST, response.data - ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # ensure something was produced... response = self.client.get("/rest/apierrors") @@ -160,15 +148,11 @@ def test_bulk_operation_produces_error_entry(self): response = self.client.get("/rest/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get( - "/rest/apierrors/%s" % response.data[0]["identifier"] - ) + response = self.client.get("/rest/apierrors/%s" % response.data[0]["identifier"]) self._assert_fields_presence(response) self.assertEqual("other" in response.data, True, response.data) self.assertEqual("bulk_request" in response.data["other"], True, response.data) - self.assertEqual( - "data_row_count" in response.data["other"], True, response.data - ) + self.assertEqual("data_row_count" in response.data["other"], True, response.data) def test_api_permitted_only_to_metax_user(self): # uses testuser by default diff --git a/src/metax_api/tests/api/rest/base/views/common/auth.py b/src/metax_api/tests/api/rest/base/views/common/auth.py index 45bae99a..2d477554 100755 --- a/src/metax_api/tests/api/rest/base/views/common/auth.py +++ b/src/metax_api/tests/api/rest/base/views/common/auth.py @@ -179,9 +179,7 @@ class ApiEndUserAdditionalProjects(CatalogRecordApiWriteCommon): def setUp(self): super().setUp() - self._use_http_authorization( - method="bearer", token=get_test_oidc_token(new_proxy=True) - ) + self._use_http_authorization(method="bearer", token=get_test_oidc_token(new_proxy=True)) self._mock_token_validation_succeeds() def tearDown(self): @@ -189,9 +187,7 @@ def tearDown(self): try: os.remove(settings.ADDITIONAL_USER_PROJECTS_PATH) except: - _logger.info( - "error removing file from %s" % settings.ADDITIONAL_USER_PROJECTS_PATH - ) + _logger.info("error removing file from %s" % settings.ADDITIONAL_USER_PROJECTS_PATH) @responses.activate def test_successful_read(self): @@ -203,9 +199,7 @@ def test_successful_read(self): json.dump(testdata, testfile, indent=4) os.chmod(settings.ADDITIONAL_USER_PROJECTS_PATH, 0o400) - response = self.client.get( - "/rest/files?project_identifier=project_x", format="json" - ) + response = self.client.get("/rest/files?project_identifier=project_x", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @responses.activate @@ -213,9 +207,7 @@ def test_no_file(self): """ Projects are fetched from token when local file is not available. """ - response = self.client.get( - "/rest/files?project_identifier=2001036", format="json" - ) + response = self.client.get("/rest/files?project_identifier=2001036", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @responses.activate @@ -228,9 +220,7 @@ def test_bad_file_keys(self): json.dump(testdata, testfile, indent=4) os.chmod(settings.ADDITIONAL_USER_PROJECTS_PATH, 0o400) - response = self.client.get( - "/rest/files?project_identifier=project_x", format="json" - ) + response = self.client.get("/rest/files?project_identifier=project_x", format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @responses.activate @@ -243,9 +233,7 @@ def test_bad_file_values(self): json.dump(testdata, testfile, indent=4) os.chmod(settings.ADDITIONAL_USER_PROJECTS_PATH, 0o400) - response = self.client.get( - "/rest/files?project_identifier=project_x", format="json" - ) + response = self.client.get("/rest/files?project_identifier=project_x", format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @responses.activate @@ -258,7 +246,5 @@ def test_bad_file_successful(self): json.dump(testdata, testfile, indent=4) os.chmod(settings.ADDITIONAL_USER_PROJECTS_PATH, 0o400) - response = self.client.get( - "/rest/files?project_identifier=2001036", format="json" - ) + response = self.client.get("/rest/files?project_identifier=2001036", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) diff --git a/src/metax_api/tests/api/rest/base/views/common/read.py b/src/metax_api/tests/api/rest/base/views/common/read.py index 9048aa5f..6d4887d7 100755 --- a/src/metax_api/tests/api/rest/base/views/common/read.py +++ b/src/metax_api/tests/api/rest/base/views/common/read.py @@ -28,11 +28,11 @@ def test_removed_query_param(self): obj2 = CatalogRecord.objects.get(pk=2) obj2.removed = True obj2.force_save() - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - response = self.client.get('/rest/datasets/1?removed=true') + response = self.client.get("/rest/datasets/1?removed=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/datasets/metadata_version_identifiers') + response = self.client.get("/rest/datasets/metadata_version_identifiers") self.assertEqual(obj.metadata_version_identifier not in response.data, True) self.assertEqual(obj2.metadata_version_identifier not in response.data, True) @@ -42,31 +42,39 @@ def test_removed_query_param(self): obj2 = File.objects.get(pk=2) obj2.removed = True obj2.force_save() - response = self.client.get('/rest/files/1') + response = self.client.get("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - response = self.client.get('/rest/files/1?removed=true') + response = self.client.get("/rest/files/1?removed=true") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_removed_parameter_gets_correct_amount_of_objects(self): - path = '/rest/datasets' + path = "/rest/datasets" objects = CatalogRecord.objects.all().values() - results = self.client.get('{0}?no_pagination&removed=false'.format(path)).json() + results = self.client.get("{0}?no_pagination&removed=false".format(path)).json() initial_amt = len(results) - results = self.client.get('{0}?no_pagination&removed=true'.format(path)).json() - self.assertEqual(len(results), 0, "Without removed objects remove=true should return 0 results") + results = self.client.get("{0}?no_pagination&removed=true".format(path)).json() + self.assertEqual( + len(results), + 0, + "Without removed objects remove=true should return 0 results", + ) self._use_http_authorization() amt_to_delete = 2 for i in range(amt_to_delete): - response = self.client.delete('{0}/{1}'.format(path, objects[i]['id'])) + response = self.client.delete("{0}/{1}".format(path, objects[i]["id"])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - results = self.client.get('{0}?no_pagination&removed=false'.format(path)).json() - self.assertEqual(len(results), initial_amt - amt_to_delete, "Non-removed object amount is incorrect") + results = self.client.get("{0}?no_pagination&removed=false".format(path)).json() + self.assertEqual( + len(results), + initial_amt - amt_to_delete, + "Non-removed object amount is incorrect", + ) - results = self.client.get('{0}?no_pagination&removed=true'.format(path)).json() + results = self.client.get("{0}?no_pagination&removed=true".format(path)).json() self.assertEqual(len(results), amt_to_delete, "Removed object amount is incorrect") @@ -77,41 +85,59 @@ class ApiReadPaginationTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_list_pagination_1(self): - response = self.client.get('/rest/datasets?limit=2&offset=0') + response = self.client.get("/rest/datasets?limit=2&offset=0") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 2, 'There should have been exactly two results') - self.assertEqual(response.data['results'][0]['id'], 1, 'Id of first result should have been 1') + self.assertEqual( + len(response.data["results"]), + 2, + "There should have been exactly two results", + ) + self.assertEqual( + response.data["results"][0]["id"], + 1, + "Id of first result should have been 1", + ) def test_read_catalog_record_list_pagination_2(self): - response = self.client.get('/rest/datasets?limit=2&offset=2') + response = self.client.get("/rest/datasets?limit=2&offset=2") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 2, 'There should have been exactly two results') - self.assertEqual(response.data['results'][0]['id'], 3, 'Id of first result should have been 3') + self.assertEqual( + len(response.data["results"]), + 2, + "There should have been exactly two results", + ) + self.assertEqual( + response.data["results"][0]["id"], + 3, + "Id of first result should have been 3", + ) def test_disable_pagination(self): - response = self.client.get('/rest/datasets?no_pagination=true') + response = self.client.get("/rest/datasets?no_pagination=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('next' not in response.data, True) - self.assertEqual('results' not in response.data, True) + self.assertEqual("next" not in response.data, True) + self.assertEqual("results" not in response.data, True) def test_pagination_ordering(self): limit = 5 - for order in ('preservation_state', '-preservation_state'): + for order in ("preservation_state", "-preservation_state"): # vary offset from 0 to 20, in increments of 5 for offset in range(0, 20, 5): - response = self.client.get(f'/rest/datasets?limit={limit}&offset={offset}&ordering={order}') + response = self.client.get( + f"/rest/datasets?limit={limit}&offset={offset}&ordering={order}" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - from_api = [cr['preservation_state'] for cr in response.data['results']] + from_api = [cr["preservation_state"] for cr in response.data["results"]] from_db = [ - r for r in CatalogRecord.objects - .filter() + r + for r in CatalogRecord.objects.filter() .order_by(order) - .values_list('preservation_state', flat=True)[offset:offset + limit] + .values_list("preservation_state", flat=True)[offset : offset + limit] ] self.assertEqual(from_api, from_db) @@ -130,33 +156,35 @@ class ApiReadHTTPHeaderTests(CatalogRecordApiReadCommon): def test_get_with_if_modified_since_header_ok(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz('GMT')) + date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - if_modified_since_header_value = date_modified_in_gmt.strftime('%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets/%s' % self.identifier, **headers) + if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S GMT") + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) if_modified_since_header_value = (date_modified_in_gmt + timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets/%s' % self.identifier, **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) if_modified_since_header_value = (date_modified_in_gmt - timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets/%s' % self.identifier, **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_with_if_modified_since_header_syntax_error(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz('GMT')) + date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - if_modified_since_header_value = date_modified_in_gmt.strftime('%a, %d %b %Y %H:%M:%S UTC') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets/%s' % self.identifier, **headers) + if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S UTC") + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # @@ -168,41 +196,43 @@ def test_get_with_if_modified_since_header_syntax_error(self): def test_list_get_with_if_modified_since_header_ok(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz('GMT')) + date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - if_modified_since_header_value = date_modified_in_gmt.strftime('%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets?limit=100', **headers) + if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S GMT") + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) == 6) + self.assertTrue(len(response.data.get("results")) == 6) if_modified_since_header_value = (date_modified_in_gmt + timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets?limit=100', **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) == 6) + self.assertTrue(len(response.data.get("results")) == 6) # The asserts below may brake if the date_modified timestamps or the amount of test data objects are altered # in the test data if_modified_since_header_value = (date_modified_in_gmt - timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/datasets?limit=100', **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) > 6) - self.assertTrue(len(response.data.get('results')) == 28) + self.assertTrue(len(response.data.get("results")) > 6) + self.assertTrue(len(response.data.get("results")) == 28) # should also work with records that have been recently created, and date_modified is empty cr.date_created = date_modified cr.date_modified = None cr.force_save() - response = self.client.get('/rest/datasets?limit=100', **headers) + response = self.client.get("/rest/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) > 6) - self.assertTrue(len(response.data.get('results')) == 28) + self.assertTrue(len(response.data.get("results")) > 6) + self.assertTrue(len(response.data.get("results")) == 28) class ApiReadQueryParamTests(CatalogRecordApiReadCommon): @@ -215,28 +245,28 @@ def test_return_requested_fields_only(self): """ While the param ?fields works with write operations too, the primary use case is when GETting. """ - response = self.client.get('/rest/datasets?fields=identifier') + response = self.client.get("/rest/datasets?fields=identifier") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('identifier' in response.data['results'][0], True) - self.assertEqual(len(response.data['results'][0].keys()), 1) - self.assertEqual(len(response.data['results'][1].keys()), 1) + self.assertEqual("identifier" in response.data["results"][0], True) + self.assertEqual(len(response.data["results"][0].keys()), 1) + self.assertEqual(len(response.data["results"][1].keys()), 1) - response = self.client.get('/rest/datasets/1?fields=identifier') + response = self.client.get("/rest/datasets/1?fields=identifier") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('identifier' in response.data, True) + self.assertEqual("identifier" in response.data, True) self.assertEqual(len(response.data.keys()), 1) - response = self.client.get('/rest/datasets/1?fields=identifier,data_catalog') - self.assertEqual('identifier' in response.data, True) - self.assertEqual('data_catalog' in response.data, True) + response = self.client.get("/rest/datasets/1?fields=identifier,data_catalog") + self.assertEqual("identifier" in response.data, True) + self.assertEqual("data_catalog" in response.data, True) self.assertEqual(len(response.data.keys()), 2) - response = self.client.get('/rest/datasets/1?fields=not_found') + response = self.client.get("/rest/datasets/1?fields=not_found") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # Anonymous user using fields parameter and not including research_dataset should not cause crashing self.client._credentials = {} - response = self.client.get('/rest/datasets/1?fields=identifier') + response = self.client.get("/rest/datasets/1?fields=identifier") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_checksum_field_for_file(self): @@ -244,20 +274,23 @@ def test_checksum_field_for_file(self): Check that checksum field works correctly """ - self._use_http_authorization('metax') - response = self.client.get('/rest/files/1?fields=checksum') + self._use_http_authorization("metax") + response = self.client.get("/rest/files/1?fields=checksum") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data.get('checksum'), 'Checksum JSON should be returned') - self.assertTrue(response.data['checksum'].get('algorithm')) - self.assertTrue(response.data['checksum'].get('checked')) - self.assertTrue(response.data['checksum'].get('value')) + self.assertTrue(response.data.get("checksum"), "Checksum JSON should be returned") + self.assertTrue(response.data["checksum"].get("algorithm")) + self.assertTrue(response.data["checksum"].get("checked")) + self.assertTrue(response.data["checksum"].get("value")) - response = self.client.get('/rest/files/1?fields=checksum:value') + response = self.client.get("/rest/files/1?fields=checksum:value") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data.get('checksum'), 'Checksum JSON should be returned') - self.assertTrue(response.data['checksum'].get('value')) - self.assertFalse(response.data['checksum'].get('algorithm')) + self.assertTrue(response.data.get("checksum"), "Checksum JSON should be returned") + self.assertTrue(response.data["checksum"].get("value")) + self.assertFalse(response.data["checksum"].get("algorithm")) - response = self.client.get('/rest/files/1?fields=checksum:badvalue') + response = self.client.get("/rest/files/1?fields=checksum:badvalue") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('is not part of' in response.data['detail'][0], 'Should complain about field not found') + self.assertTrue( + "is not part of" in response.data["detail"][0], + "Should complain about field not found", + ) diff --git a/src/metax_api/tests/api/rest/base/views/common/write.py b/src/metax_api/tests/api/rest/base/views/common/write.py index be5fc179..7a630879 100755 --- a/src/metax_api/tests/api/rest/base/views/common/write.py +++ b/src/metax_api/tests/api/rest/base/views/common/write.py @@ -27,28 +27,30 @@ class ApiWriteCommon(APITestCase, TestClassUtils): - def setUp(self): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) self.test_new_data = self._get_new_test_data() self._use_http_authorization() def _get_new_test_data(self): - record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=0) - record_from_test_data.update({ - "data_catalog": 1, - }) - record_from_test_data['research_dataset'].update({ - "preferred_identifier": None, - }) - record_from_test_data.pop('id', None) - record_from_test_data.pop('identifier', None) - record_from_test_data.pop('contract', None) + record_from_test_data = self._get_object_from_test_data("catalogrecord", requested_index=0) + record_from_test_data.update( + { + "data_catalog": 1, + } + ) + record_from_test_data["research_dataset"].update( + { + "preferred_identifier": None, + } + ) + record_from_test_data.pop("id", None) + record_from_test_data.pop("identifier", None) + record_from_test_data.pop("contract", None) return record_from_test_data class ApiWriteCommonFieldsTests(ApiWriteCommon): - def test_certain_create_fields_are_read_only_after_create(self): """ The following fields should be read-only after initial creation of a resource: @@ -56,61 +58,64 @@ def test_certain_create_fields_are_read_only_after_create(self): - user_created - service_created """ - response = self.client.post('/rest/datasets', self.test_new_data, format="json") + response = self.client.post("/rest/datasets", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # some of the fields could be empty in test data. that is fine tho, the point is that # they should not change later. - orig_date_created = response.data.get('date_created', None) - orig_user_created = response.data.get('user_created', None) - orig_service_created = response.data.get('service_created', None) + orig_date_created = response.data.get("date_created", None) + orig_user_created = response.data.get("user_created", None) + orig_service_created = response.data.get("service_created", None) altered = response.data - altered['date_created'] = altered['date_created'].replace('2017', '2010') - altered['user_created'] = 'changed' - altered['service_created'] = 'changed' + altered["date_created"] = altered["date_created"].replace("2017", "2010") + altered["user_created"] = "changed" + altered["service_created"] = "changed" - response = self.client.put('/rest/datasets/%d' % altered['id'], altered, format="json") + response = self.client.put("/rest/datasets/%d" % altered["id"], altered, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/datasets/%d' % altered['id'], format="json") + response = self.client.get("/rest/datasets/%d" % altered["id"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(orig_date_created, response.data.get('date_created', None)) - self.assertEqual(orig_user_created, response.data.get('user_created', None)) - self.assertEqual(orig_service_created, response.data.get('service_created', None)) + self.assertEqual(orig_date_created, response.data.get("date_created", None)) + self.assertEqual(orig_user_created, response.data.get("user_created", None)) + self.assertEqual(orig_service_created, response.data.get("service_created", None)) def test_deletion_sets_removed_true_and_sets_value_for_date_removed(self): - response = self.client.post('/rest/datasets', self.test_new_data, format="json") + response = self.client.post("/rest/datasets", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] - response = self.client.delete('/rest/datasets/%d' % cr_id) + cr_id = response.data["id"] + response = self.client.delete("/rest/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # Verify date_removed got set - response = self.client.get('/rest/datasets/%d?removed' % cr_id) + response = self.client.get("/rest/datasets/%d?removed" % cr_id) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data['removed'] is True) - self.assertTrue(response.data.get('date_removed', '').startswith('2')) + self.assertTrue(response.data["removed"] is True) + self.assertTrue(response.data.get("date_removed", "").startswith("2")) def test_updating_sets_removed_false_and_empties_date_removed(self): - response = self.client.post('/rest/datasets', self.test_new_data, format="json") + response = self.client.post("/rest/datasets", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] - response = self.client.delete('/rest/datasets/%d' % cr_id) + cr_id = response.data["id"] + response = self.client.delete("/rest/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - rd = self.client.get('/rest/datasets/%d?removed' % cr_id).data - rd_date_rem = rd['date_removed'] - sleep(1) # ensure that next request happens with different timestamp - response = self.client.put('/rest/datasets/%d?removed' % cr_id, rd, format="json") + rd = self.client.get("/rest/datasets/%d?removed" % cr_id).data + rd_date_rem = rd["date_removed"] + sleep(1) # ensure that next request happens with different timestamp + response = self.client.put("/rest/datasets/%d?removed" % cr_id, rd, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/datasets/%d' % cr_id) + response = self.client.get("/rest/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data['removed'] is False) - self.assertTrue(response.data.get('date_removed') is None) - self.assertTrue(response.data.get('date_modified') != rd_date_rem, 'date_modified should be updated') + self.assertTrue(response.data["removed"] is False) + self.assertTrue(response.data.get("date_removed") is None) + self.assertTrue( + response.data.get("date_modified") != rd_date_rem, + "date_modified should be updated", + ) class ApiWriteHTTPHeaderTests(CatalogRecordApiWriteCommon): @@ -120,47 +125,53 @@ class ApiWriteHTTPHeaderTests(CatalogRecordApiWriteCommon): # def test_update_with_if_unmodified_since_header_ok(self): - cr = self.client.get('/rest/datasets/1').data - cr['preservation_description'] = 'damn this is good coffee' + cr = self.client.get("/rest/datasets/1").data + cr["preservation_description"] = "damn this is good coffee" cr_obj = CatalogRecord.objects.get(pk=1) - headers = {'HTTP_IF_UNMODIFIED_SINCE': cr_obj.date_modified.strftime('%a, %d %b %Y %H:%M:%S GMT')} + headers = { + "HTTP_IF_UNMODIFIED_SINCE": cr_obj.date_modified.strftime("%a, %d %b %Y %H:%M:%S GMT") + } - response = self.client.put('/rest/datasets/1', cr, format="json", **headers) + response = self.client.put("/rest/datasets/1", cr, format="json", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_update_with_if_unmodified_since_header_precondition_failed_error(self): - cr = self.client.get('/rest/datasets/1').data - cr['preservation_description'] = 'the owls are not what they seem' + cr = self.client.get("/rest/datasets/1").data + cr["preservation_description"] = "the owls are not what they seem" - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'Wed, 23 Sep 2009 22:15:29 GMT'} + headers = {"HTTP_IF_UNMODIFIED_SINCE": "Wed, 23 Sep 2009 22:15:29 GMT"} - response = self.client.put('/rest/datasets/1', cr, format="json", **headers) - self.assertEqual(response.status_code, 412, 'http status should be 412 = precondition failed') + response = self.client.put("/rest/datasets/1", cr, format="json", **headers) + self.assertEqual( + response.status_code, 412, "http status should be 412 = precondition failed" + ) def test_update_with_if_unmodified_since_header_syntax_error(self): - cr = self.client.get('/rest/datasets/1').data - cr['preservation_description'] = 'the owls are not what they seem' + cr = self.client.get("/rest/datasets/1").data + cr["preservation_description"] = "the owls are not what they seem" cr_obj = CatalogRecord.objects.get(pk=1) - headers = {'HTTP_IF_UNMODIFIED_SINCE': cr_obj.date_modified.strftime('%a, %d %b %Y %H:%M:%S UTC')} + headers = { + "HTTP_IF_UNMODIFIED_SINCE": cr_obj.date_modified.strftime("%a, %d %b %Y %H:%M:%S UTC") + } - response = self.client.put('/rest/datasets/1', cr, format="json", **headers) - self.assertEqual(response.status_code, 400, 'http status should be 400') + response = self.client.put("/rest/datasets/1", cr, format="json", **headers) + self.assertEqual(response.status_code, 400, "http status should be 400") # # header if-unmodified-since tests, list # def test_update_list_with_if_unmodified_since_header_ok(self): - data_1 = self.client.get('/rest/datasets/1', format="json").data - data_2 = self.client.get('/rest/datasets/2', format="json").data + data_1 = self.client.get("/rest/datasets/1", format="json").data + data_2 = self.client.get("/rest/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' - data_2['preservation_description'] = 'damn this is good coffee also' + data_1["preservation_description"] = "damn this is good coffee" + data_2["preservation_description"] = "damn this is good coffee also" - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.put('/rest/datasets', [data_1, data_2], format="json", **headers) + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.put("/rest/datasets", [data_1, data_2], format="json", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @@ -168,36 +179,46 @@ def test_update_list_with_if_unmodified_since_header_error_1(self): """ One resource being updated was updated in the meantime, resulting in an error """ - data_1 = self.client.get('/rest/datasets/1', format="json").data - data_2 = self.client.get('/rest/datasets/2', format="json").data + data_1 = self.client.get("/rest/datasets/1", format="json").data + data_2 = self.client.get("/rest/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' + data_1["preservation_description"] = "damn this is good coffee" # should result in error for this record - data_2['date_modified'] = '2002-01-01T10:10:10Z' - - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.put('/rest/datasets', [data_1, data_2], format="json", **headers) - self.assertEqual(len(response.data['failed']) == 1, True, 'there should be only one failed update') - self.assertEqual('modified' in response.data['failed'][0]['errors']['detail'][0], True, - 'error should indicate resource has been modified') + data_2["date_modified"] = "2002-01-01T10:10:10Z" + + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.put("/rest/datasets", [data_1, data_2], format="json", **headers) + self.assertEqual( + len(response.data["failed"]) == 1, + True, + "there should be only one failed update", + ) + self.assertEqual( + "modified" in response.data["failed"][0]["errors"]["detail"][0], + True, + "error should indicate resource has been modified", + ) def test_update_list_with_if_unmodified_since_header_error_2(self): """ Field date_modified is missing, while if-modified-since header is set, resulting in an error. """ - data_1 = self.client.get('/rest/datasets/1', format="json").data - data_2 = self.client.get('/rest/datasets/2', format="json").data + data_1 = self.client.get("/rest/datasets/1", format="json").data + data_2 = self.client.get("/rest/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' + data_1["preservation_description"] = "damn this is good coffee" # should result in error for this record - data_2.pop('date_modified') + data_2.pop("date_modified") - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.patch('/rest/datasets', [data_1, data_2], format="json", **headers) - self.assertEqual('required' in response.data['failed'][0]['errors']['detail'][0], True, - 'error should be about field date_modified is required') + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.patch("/rest/datasets", [data_1, data_2], format="json", **headers) + self.assertEqual( + "required" in response.data["failed"][0]["errors"]["detail"][0], + True, + "error should be about field date_modified is required", + ) def test_update_list_with_if_unmodified_since_header_error_3(self): """ @@ -205,17 +226,20 @@ def test_update_list_with_if_unmodified_since_header_error_3(self): is an accepted value. The end result should be that the resource has been modified, since the server version has a timestamp set in date_modified. """ - data_1 = self.client.get('/rest/datasets/1', format="json").data - data_2 = self.client.get('/rest/datasets/2', format="json").data + data_1 = self.client.get("/rest/datasets/1", format="json").data + data_2 = self.client.get("/rest/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' - data_2['preservation_description'] = 'damn this is good coffee also' - data_2['date_modified'] = None + data_1["preservation_description"] = "damn this is good coffee" + data_2["preservation_description"] = "damn this is good coffee also" + data_2["date_modified"] = None - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.put('/rest/datasets', [data_1, data_2], format="json", **headers) - self.assertEqual('modified' in response.data['failed'][0]['errors']['detail'][0], True, - 'error should indicate resource has been modified') + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.put("/rest/datasets", [data_1, data_2], format="json", **headers) + self.assertEqual( + "modified" in response.data["failed"][0]["errors"]["detail"][0], + True, + "error should indicate resource has been modified", + ) class ApiWriteAtomicBulkOperations(CatalogRecordApiWriteCommon): @@ -226,47 +250,55 @@ class ApiWriteAtomicBulkOperations(CatalogRecordApiWriteCommon): """ def test_atomic_create(self): - cr = self.client.get('/rest/datasets/1', format="json").data - cr.pop('id') - cr.pop('identifier') - cr['research_dataset'].pop('metadata_version_identifier') - cr['research_dataset'].pop('preferred_identifier') + cr = self.client.get("/rest/datasets/1", format="json").data + cr.pop("id") + cr.pop("identifier") + cr["research_dataset"].pop("metadata_version_identifier") + cr["research_dataset"].pop("preferred_identifier") cr2 = deepcopy(cr) cr3 = deepcopy(cr) - cr3.pop('data_catalog') # causes error + cr3.pop("data_catalog") # causes error record_count_before = CatalogRecord.objects.all().count() - response = self.client.post('/rest/datasets?atomic=true', [cr, cr2, cr3], format="json") + response = self.client.post("/rest/datasets?atomic=true", [cr, cr2, cr3], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) - self.assertEqual(len(response.data['success']) == 0, True, response.data) - self.assertEqual(len(response.data['failed']) == 1, True, response.data) - self.assertEqual('detail' in response.data, True, response.data) - self.assertEqual('atomic' in response.data['detail'][0], True, response.data) - self.assertEqual(record_count_before, CatalogRecord.objects.all().count(), 'shouldnt create new records') + self.assertEqual(len(response.data["success"]) == 0, True, response.data) + self.assertEqual(len(response.data["failed"]) == 1, True, response.data) + self.assertEqual("detail" in response.data, True, response.data) + self.assertEqual("atomic" in response.data["detail"][0], True, response.data) + self.assertEqual( + record_count_before, + CatalogRecord.objects.all().count(), + "shouldnt create new records", + ) def test_atomic_update(self): - cr = self.client.get('/rest/datasets/1', format="json").data - cr2 = self.client.get('/rest/datasets/2', format="json").data - cr3 = self.client.get('/rest/datasets/3', format="json").data - cr['research_dataset']['title']['en'] = 'updated' - cr2['research_dataset']['title']['en'] = 'updated' - cr3.pop('data_catalog') # causes error + cr = self.client.get("/rest/datasets/1", format="json").data + cr2 = self.client.get("/rest/datasets/2", format="json").data + cr3 = self.client.get("/rest/datasets/3", format="json").data + cr["research_dataset"]["title"]["en"] = "updated" + cr2["research_dataset"]["title"]["en"] = "updated" + cr3.pop("data_catalog") # causes error record_count_before = CatalogRecord.objects.all().count() - response = self.client.put('/rest/datasets?atomic=true', [cr, cr2, cr3], format="json") + response = self.client.put("/rest/datasets?atomic=true", [cr, cr2, cr3], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data['success']) == 0, True) - self.assertEqual(len(response.data['failed']) == 1, True) - self.assertEqual('atomic' in response.data['detail'][0], True) - self.assertEqual(record_count_before, CatalogRecord.objects.all().count(), 'shouldnt create new versions') + self.assertEqual(len(response.data["success"]) == 0, True) + self.assertEqual(len(response.data["failed"]) == 1, True) + self.assertEqual("atomic" in response.data["detail"][0], True) + self.assertEqual( + record_count_before, + CatalogRecord.objects.all().count(), + "shouldnt create new versions", + ) - cr = self.client.get('/rest/datasets/1', format="json").data - cr2 = self.client.get('/rest/datasets/2', format="json").data - self.assertEqual(cr['research_dataset']['title']['en'] == 'updated', False) - self.assertEqual(cr2['research_dataset']['title']['en'] == 'updated', False) + cr = self.client.get("/rest/datasets/1", format="json").data + cr2 = self.client.get("/rest/datasets/2", format="json").data + self.assertEqual(cr["research_dataset"]["title"]["en"] == "updated", False) + self.assertEqual(cr2["research_dataset"]["title"]["en"] == "updated", False) class ApiWriteQueryParamTests(ApiWriteCommon): @@ -280,11 +312,15 @@ def test_dryrun(self): Ensure query parameter ?dryrun=true returns same result as they normally would, but changes made during the request do not get saved in the db. """ - response = self.client.post('/rest/datasets?dryrun=true', self.test_new_data, format="json") + response = self.client.post("/rest/datasets?dryrun=true", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('id' in response.data, True) - found = CatalogRecord.objects.filter(pk=response.data['id']).exists() - self.assertEqual(found, False, 'record should not get truly created when using parameter dryrun') + self.assertEqual("id" in response.data, True) + found = CatalogRecord.objects.filter(pk=response.data["id"]).exists() + self.assertEqual( + found, + False, + "record should not get truly created when using parameter dryrun", + ) class ApiWriteCommonOperations(ApiWriteCommon): @@ -293,10 +329,10 @@ class ApiWriteCommonOperations(ApiWriteCommon): """ def test_create_file_with_empty_body_fails(self): - response = self.client.post('/rest/datasets') + response = self.client.post("/rest/datasets") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Request body is required' in response.data['detail'][0]) + self.assertTrue("Request body is required" in response.data["detail"][0]) - response = self.client.post('/rest/files') + response = self.client.post("/rest/files") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Request body is required' in response.data['detail'][0]) + self.assertTrue("Request body is required" in response.data["detail"][0]) diff --git a/src/metax_api/tests/api/rest/base/views/contracts/contracts.py b/src/metax_api/tests/api/rest/base/views/contracts/contracts.py index 90babef0..b79f55e0 100755 --- a/src/metax_api/tests/api/rest/base/views/contracts/contracts.py +++ b/src/metax_api/tests/api/rest/base/views/contracts/contracts.py @@ -19,29 +19,29 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(ContractApiReadTestV1, cls).setUpClass() def setUp(self): - contract_from_test_data = self._get_object_from_test_data('contract', requested_index=0) - self.pk = contract_from_test_data['id'] - self.identifier = contract_from_test_data['contract_json']['identifier'] + contract_from_test_data = self._get_object_from_test_data("contract", requested_index=0) + self.pk = contract_from_test_data["id"] + self.identifier = contract_from_test_data["contract_json"]["identifier"] self._use_http_authorization() def test_read_contract_list(self): - response = self.client.get('/rest/datasets') + response = self.client.get("/rest/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_contract_details_by_pk(self): - response = self.client.get('/rest/contracts/%s' % self.pk) + response = self.client.get("/rest/contracts/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_contract_details_by_identifier(self): - response = self.client.get('/rest/contracts/%s' % self.identifier) + response = self.client.get("/rest/contracts/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_contract_details_not_found(self): - response = self.client.get('/rest/contracts/shouldnotexist') + response = self.client.get("/rest/contracts/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) @@ -50,10 +50,10 @@ def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) self._use_http_authorization() - contract_from_test_data = self._get_object_from_test_data('contract') - self.pk = contract_from_test_data['id'] + contract_from_test_data = self._get_object_from_test_data("contract") + self.pk = contract_from_test_data["id"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified @@ -64,32 +64,38 @@ def setUp(self): self._use_http_authorization() def test_create_contract_with_existing_identifier(self): - self.test_new_data['pk'] = self.pk - response = self.client.post('/rest/contracts/', self.test_new_data, format="json") + self.test_new_data["pk"] = self.pk + response = self.client.post("/rest/contracts/", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.post('/rest/contracts/', self.test_new_data, format="json") + response = self.client.post("/rest/contracts/", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('already exists' in response.data['contract_json'][0], - 'Error regarding dublicated identifier') + self.assertTrue( + "already exists" in response.data["contract_json"][0], + "Error regarding dublicated identifier", + ) def test_update_contract(self): - self.test_new_data['pk'] = self.pk - response = self.client.put('/rest/contracts/%s' % self.pk, self.test_new_data, format="json") + self.test_new_data["pk"] = self.pk + response = self.client.put( + "/rest/contracts/%s" % self.pk, self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_update_contract_not_found(self): - response = self.client.put('/rest/contracts/doesnotexist', self.test_new_data, format="json") + response = self.client.put( + "/rest/contracts/doesnotexist", self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_add_catalog_record_to_contract(self): - new_catalog_record = self.client.get('/rest/datasets/1', format="json").data - new_catalog_record.pop('id') - new_catalog_record.pop('identifier') - new_catalog_record['research_dataset'].pop('preferred_identifier') - new_catalog_record['contract'] = self.pk + new_catalog_record = self.client.get("/rest/datasets/1", format="json").data + new_catalog_record.pop("id") + new_catalog_record.pop("identifier") + new_catalog_record["research_dataset"].pop("preferred_identifier") + new_catalog_record["contract"] = self.pk - response = self.client.post('/rest/datasets', new_catalog_record, format="json") + response = self.client.post("/rest/datasets", new_catalog_record, format="json") created_catalog_record = response.data try: @@ -97,21 +103,26 @@ def test_add_catalog_record_to_contract(self): except Exception: print(response.data) raise - self.assertEqual('research_dataset' in created_catalog_record.keys(), True) - self.assertEqual(created_catalog_record['contract']['id'], self.pk) + self.assertEqual("research_dataset" in created_catalog_record.keys(), True) + self.assertEqual(created_catalog_record["contract"]["id"], self.pk) contract = Contract.objects.get(pk=self.pk) try: - contract.records.get(pk=response.data['id']) + contract.records.get(pk=response.data["id"]) except CatalogRecord.DoesNotExist: - raise Exception('The added CatalogRecord should appear in the relation contract.records') + raise Exception( + "The added CatalogRecord should appear in the relation contract.records" + ) - response = self.client.get('/rest/contracts/%d/datasets' % self.pk) - self.assertIn(created_catalog_record['id'], [cr['id'] for cr in response.data], - 'The added CatalogRecord should appear in the results of /contracts/id/datasets') + response = self.client.get("/rest/contracts/%d/datasets" % self.pk) + self.assertIn( + created_catalog_record["id"], + [cr["id"] for cr in response.data], + "The added CatalogRecord should appear in the results of /contracts/id/datasets", + ) def test_delete_contract(self): - url = '/rest/contracts/%s' % self.pk + url = "/rest/contracts/%s" % self.pk response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(url) @@ -125,43 +136,65 @@ def test_delete_contract(self): pass if deleted_contract: - raise Exception('Deleted Contract should not be retrievable from the default objects table') + raise Exception( + "Deleted Contract should not be retrievable from the default objects table" + ) try: deleted_contract = Contract.objects_unfiltered.get(pk=self.pk) except Contract.DoesNotExist: - raise Exception('Deleted contract should not be deleted from the db') - - self.assertEqual(deleted_contract.removed, True, 'Deleted contract should be marked removed in the db') - self.assertEqual(deleted_contract.date_modified, deleted_contract.date_removed, - 'date_modified should be updated') + raise Exception("Deleted contract should not be deleted from the db") + + self.assertEqual( + deleted_contract.removed, + True, + "Deleted contract should be marked removed in the db", + ) + self.assertEqual( + deleted_contract.date_modified, + deleted_contract.date_removed, + "date_modified should be updated", + ) def test_delete_contract_catalog_records_are_marked_removed(self): # add two new records to contract new_catalog_record = self._get_new_catalog_record_test_data() - new_catalog_record['contract'] = self.pk - self.client.post('/rest/datasets', new_catalog_record, format="json") - self.client.post('/rest/datasets', new_catalog_record, format="json") + new_catalog_record["contract"] = self.pk + self.client.post("/rest/datasets", new_catalog_record, format="json") + self.client.post("/rest/datasets", new_catalog_record, format="json") - self.client.delete('/rest/contracts/%s' % self.pk) + self.client.delete("/rest/contracts/%s" % self.pk) contract = Contract.objects_unfiltered.get(pk=self.pk) - related_crs = contract.records(manager='objects_unfiltered').all() - response_get_1 = self.client.get('/rest/datasets/%d' % related_crs[0].id) - self.assertEqual(response_get_1.status_code, status.HTTP_404_NOT_FOUND, - 'CatalogRecords of deleted contracts should not be retrievable through the api') - response_get_2 = self.client.get('/rest/datasets/%d' % related_crs[1].id) - self.assertEqual(response_get_2.status_code, status.HTTP_404_NOT_FOUND, - 'CatalogRecords of deleted contracts should not be retrievable through the api') + related_crs = contract.records(manager="objects_unfiltered").all() + response_get_1 = self.client.get("/rest/datasets/%d" % related_crs[0].id) + self.assertEqual( + response_get_1.status_code, + status.HTTP_404_NOT_FOUND, + "CatalogRecords of deleted contracts should not be retrievable through the api", + ) + response_get_2 = self.client.get("/rest/datasets/%d" % related_crs[1].id) + self.assertEqual( + response_get_2.status_code, + status.HTTP_404_NOT_FOUND, + "CatalogRecords of deleted contracts should not be retrievable through the api", + ) for cr in related_crs: - self.assertEqual(cr.removed, True, 'Related CatalogRecord objects should be marked as removed') + self.assertEqual( + cr.removed, + True, + "Related CatalogRecord objects should be marked as removed", + ) def test_deleted_catalog_record_is_not_listed_in_contract_datasets_api(self): deleted_id = 1 - self.client.delete('/rest/datasets/%d' % deleted_id) - response = self.client.get('/rest/contracts/%d/datasets' % self.pk) - self.assertNotIn(deleted_id, [cr['id'] for cr in response.data], - 'The deleted CatalogRecord should not appear in the results of /contracts/id/datasets') + self.client.delete("/rest/datasets/%d" % deleted_id) + response = self.client.get("/rest/contracts/%d/datasets" % self.pk) + self.assertNotIn( + deleted_id, + [cr["id"] for cr in response.data], + "The deleted CatalogRecord should not appear in the results of /contracts/id/datasets", + ) def _get_new_test_data(self): return { @@ -172,22 +205,19 @@ def _get_new_test_data(self): "created": "2014-01-17T08:19:58Z", "modified": "2014-01-17T08:19:58Z", "description": "Description of unknown length", - "contact": [{ - "name": "Contact Name", - "phone": "+358501231234", - "email": "contact.email@csc.fi" - }], + "contact": [ + { + "name": "Contact Name", + "phone": "+358501231234", + "email": "contact.email@csc.fi", + } + ], "organization": { "organization_identifier": "1234567abc", - "name": "Mysterious organization" + "name": "Mysterious organization", }, - "related_service": [{ - "identifier": "local:service:id", - "name": "Name of Service" - }], - "validity": { - "start_date": "2014-01-17" - } + "related_service": [{"identifier": "local:service:id", "name": "Name of Service"}], + "validity": {"start_date": "2014-01-17"}, } } @@ -200,57 +230,56 @@ def _get_second_new_test_data(self): "created": "2014-01-17T08:19:58Z", "modified": "2014-01-17T08:19:58Z", "description": "Description of unknown length", - "contact": [{ - "name": "Contact Name", - "phone": "+358501231234", - "email": "contact.email@csc.fi" - }], + "contact": [ + { + "name": "Contact Name", + "phone": "+358501231234", + "email": "contact.email@csc.fi", + } + ], "organization": { "organization_identifier": "1234567abc", - "name": "Mysterious organization" + "name": "Mysterious organization", }, - "related_service": [{ - "identifier": "local:service:id", - "name": "Name of Service" - }], - "validity": { - "start_date": "2014-01-17" - } + "related_service": [{"identifier": "local:service:id", "name": "Name of Service"}], + "validity": {"start_date": "2014-01-17"}, } } def _get_new_catalog_record_test_data(self): - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=0) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=0 + ) return { "identifier": "http://urn.fi/urn:nbn:fi:iiidentifier", - "data_catalog": self._get_object_from_test_data('datacatalog', requested_index=0), + "data_catalog": self._get_object_from_test_data("datacatalog", requested_index=0), "research_dataset": { "modified": "2014-01-17T08:19:58Z", - "version_notes": [ - "This version contains changes to x and y." + "version_notes": ["This version contains changes to x and y."], + "title": {"en": "Wonderful Title"}, + "description": [ + { + "en": "A descriptive description describing the contents of this dataset. Must be descriptive." + } ], - "title": { - "en": "Wonderful Title" - }, - "description": [{ - "en": "A descriptive description describing the contents of this dataset. Must be descriptive." - }], - "creator": [{ - "@type": "Person", - "name": "Teppo Testaaja", - "member_of": { + "creator": [ + { + "@type": "Person", + "name": "Teppo Testaaja", + "member_of": { + "@type": "Organization", + "name": {"fi": "Mysterious Organization"}, + }, + } + ], + "curator": [ + { "@type": "Organization", - "name": {"fi": "Mysterious Organization"} + "name": {"en": "Curator org", "fi": "Organisaatio"}, } - }], - "curator": [{ - "@type": "Organization", - "name": {"en": "Curator org", "fi": "Organisaatio"} - }], - "language": [{ - "identifier": "http://lexvo.org/id/iso639-3/aar" - }], + ], + "language": [{"identifier": "http://lexvo.org/id/iso639-3/aar"}], "total_files_byte_size": 1024, - "files": catalog_record_from_test_data['research_dataset']['files'] - } + "files": catalog_record_from_test_data["research_dataset"]["files"], + }, } diff --git a/src/metax_api/tests/api/rest/base/views/datacatalogs/read.py b/src/metax_api/tests/api/rest/base/views/datacatalogs/read.py index 7d44b705..c93e5987 100755 --- a/src/metax_api/tests/api/rest/base/views/datacatalogs/read.py +++ b/src/metax_api/tests/api/rest/base/views/datacatalogs/read.py @@ -18,22 +18,24 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DataCatalogApiReadBasicTests, cls).setUpClass() def setUp(self): - data_catalog_from_test_data = self._get_object_from_test_data('datacatalog', requested_index=0) + data_catalog_from_test_data = self._get_object_from_test_data( + "datacatalog", requested_index=0 + ) self._use_http_authorization() - self.pk = data_catalog_from_test_data['id'] - self.identifier = data_catalog_from_test_data['catalog_json']['identifier'] + self.pk = data_catalog_from_test_data["id"] + self.identifier = data_catalog_from_test_data["catalog_json"]["identifier"] def test_basic_get(self): - response = self.client.get('/rest/datacatalogs/%s' % self.identifier) + response = self.client.get("/rest/datacatalogs/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_allowed_read_methods(self): self.client._credentials = {} - for req in ['/rest/datacatalogs', '/rest/datacatalogs/1']: + for req in ["/rest/datacatalogs", "/rest/datacatalogs/1"]: response = self.client.get(req) self.assertEqual(response.status_code, status.HTTP_200_OK) response = self.client.head(req) diff --git a/src/metax_api/tests/api/rest/base/views/datacatalogs/write.py b/src/metax_api/tests/api/rest/base/views/datacatalogs/write.py index ae73ee98..0edacec7 100755 --- a/src/metax_api/tests/api/rest/base/views/datacatalogs/write.py +++ b/src/metax_api/tests/api/rest/base/views/datacatalogs/write.py @@ -21,55 +21,65 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DataCatalogApiWriteCommon, cls).setUpClass() def setUp(self): - self.new_test_data = self._get_object_from_test_data('datacatalog') - self.new_test_data.pop('id') - self.new_test_data['catalog_json']['identifier'] = 'new-data-catalog' + self.new_test_data = self._get_object_from_test_data("datacatalog") + self.new_test_data.pop("id") + self.new_test_data["catalog_json"]["identifier"] = "new-data-catalog" self._use_http_authorization() class DataCatalogApiWriteBasicTests(DataCatalogApiWriteCommon): - def test_identifier_is_auto_generated(self): - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertNotEqual(response.data['catalog_json'].get('identifier', None), None, 'identifier should be created') + self.assertNotEqual( + response.data["catalog_json"].get("identifier", None), + None, + "identifier should be created", + ) def test_research_dataset_schema_missing_ok(self): - self.new_test_data['catalog_json'].pop('research_dataset_schema', None) - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + self.new_test_data["catalog_json"].pop("research_dataset_schema", None) + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_research_dataset_schema_not_found_error(self): - self.new_test_data['catalog_json']['research_dataset_schema'] = 'notfound' - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + self.new_test_data["catalog_json"]["research_dataset_schema"] = "notfound" + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_disallow_versioning_in_harvested_catalogs(self): - self.new_test_data['catalog_json']['dataset_versioning'] = True - self.new_test_data['catalog_json']['harvested'] = True - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + self.new_test_data["catalog_json"]["dataset_versioning"] = True + self.new_test_data["catalog_json"]["harvested"] = True + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('versioning' in response.data['detail'][0], True, response.data) + self.assertEqual("versioning" in response.data["detail"][0], True, response.data) def test_create_identifier_already_exists(self): - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('already exists' in response.data['catalog_json']['identifier'][0], - True, response.data) + self.assertEqual( + "already exists" in response.data["catalog_json"]["identifier"][0], + True, + response.data, + ) def test_delete(self): - response = self.client.delete('/rest/datacatalogs/1') + response = self.client.delete("/rest/datacatalogs/1") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) dc_deleted = DataCatalog.objects_unfiltered.get(pk=1) self.assertEqual(dc_deleted.removed, True) - self.assertEqual(dc_deleted.date_modified, dc_deleted.date_removed, 'date_modified should be updated') + self.assertEqual( + dc_deleted.date_modified, + dc_deleted.date_removed, + "date_modified should be updated", + ) def test_publisher_name_is_required(self): """ @@ -78,12 +88,13 @@ def test_publisher_name_is_required(self): """ catalog = deepcopy(self.new_test_data) - catalog['catalog_json']['publisher'].pop('identifier', None) - catalog['catalog_json']['publisher'].pop('name', None) + catalog["catalog_json"]["publisher"].pop("identifier", None) + catalog["catalog_json"]["publisher"].pop("name", None) - response = self.client.post('/rest/datacatalogs', catalog, format="json") + response = self.client.post("/rest/datacatalogs", catalog, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('name' in response.data['catalog_json'][0], response.data) + self.assertTrue("name" in response.data["catalog_json"][0], response.data) + class DataCatalogApiWriteReferenceDataTests(DataCatalogApiWriteCommon): """ @@ -92,15 +103,15 @@ class DataCatalogApiWriteReferenceDataTests(DataCatalogApiWriteCommon): """ def test_create_data_catalog_with_invalid_reference_data(self): - dc = self.new_test_data['catalog_json'] - dc['field_of_science'][0]['identifier'] = 'nonexisting' - dc['language'][0]['identifier'] = 'nonexisting' - dc['access_rights']['access_type'][0]['identifier'] = 'nonexisting' - dc['access_rights']['license'][0]['identifier'] = 'nonexisting' - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + dc = self.new_test_data["catalog_json"] + dc["field_of_science"][0]["identifier"] = "nonexisting" + dc["language"][0]["identifier"] = "nonexisting" + dc["access_rights"]["access_type"][0]["identifier"] = "nonexisting" + dc["access_rights"]["license"][0]["identifier"] = "nonexisting" + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('catalog_json' in response.data.keys(), True) - self.assertEqual(len(response.data['catalog_json']), 4) + self.assertEqual("catalog_json" in response.data.keys(), True) + self.assertEqual(len(response.data["catalog_json"]), 4) def test_create_data_catalog_populate_fields_from_reference_data(self): """ @@ -111,74 +122,93 @@ def test_create_data_catalog_populate_fields_from_reference_data(self): 3) Check that labels have also been copied to data catalog to their approriate fields """ cache = RedisClient() - refdata = cache.get('reference_data')['reference_data'] - orgdata = cache.get('reference_data')['organization_data'] + refdata = cache.get("reference_data")["reference_data"] + orgdata = cache.get("reference_data")["organization_data"] refs = {} data_types = [ - 'access_type', - 'field_of_science', - 'language', - 'license', + "access_type", + "field_of_science", + "language", + "license", ] # the values in these selected entries will be used throghout the rest of the test case for dtype in data_types: entry = refdata[dtype][0] refs[dtype] = { - 'code': entry['code'], - 'uri': entry['uri'], - 'label': entry.get('label', None), + "code": entry["code"], + "uri": entry["uri"], + "label": entry.get("label", None), } - refs['organization'] = { - 'uri': orgdata['organization'][1]['uri'], - 'code': orgdata['organization'][1]['code'], - 'label': orgdata['organization'][1]['label'], + refs["organization"] = { + "uri": orgdata["organization"][1]["uri"], + "code": orgdata["organization"][1]["code"], + "label": orgdata["organization"][1]["label"], } # replace the relations with objects that have only the identifier set with code as value, # to easily check that label was populated (= that it appeared in the dataset after create) # without knowing its original value from the generated test data - dc = self.new_test_data['catalog_json'] - dc['field_of_science'][0] = {'identifier': refs['field_of_science']['code']} - dc['language'][0] = {'identifier': refs['language']['code']} - dc['access_rights']['access_type'][0] = {'identifier': refs['access_type']['code']} - dc['access_rights']['license'][0] = {'identifier': refs['license']['code']} + dc = self.new_test_data["catalog_json"] + dc["field_of_science"][0] = {"identifier": refs["field_of_science"]["code"]} + dc["language"][0] = {"identifier": refs["language"]["code"]} + dc["access_rights"]["access_type"][0] = {"identifier": refs["access_type"]["code"]} + dc["access_rights"]["license"][0] = {"identifier": refs["license"]["code"]} # these have other required fields, so only update the identifier with code - dc['publisher']['identifier'] = refs['organization']['code'] - dc['access_rights']['has_rights_related_agent'][0]['identifier'] = refs['organization']['code'] + dc["publisher"]["identifier"] = refs["organization"]["code"] + dc["access_rights"]["has_rights_related_agent"][0]["identifier"] = refs["organization"][ + "code" + ] # ensure that name is not required if reference data is used, i.e. identifier is given - dc['publisher'].pop('name', None) + dc["publisher"].pop("name", None) - response = self.client.post('/rest/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('catalog_json' in response.data.keys(), True) + self.assertEqual("catalog_json" in response.data.keys(), True) - new_dc = response.data['catalog_json'] + new_dc = response.data["catalog_json"] self._assert_uri_copied_to_identifier(refs, new_dc) self._assert_label_copied_to_pref_label(refs, new_dc) self._assert_label_copied_to_title(refs, new_dc) self._assert_label_copied_to_name(refs, new_dc) def _assert_uri_copied_to_identifier(self, refs, new_dc): - self.assertEqual(refs['field_of_science']['uri'], new_dc['field_of_science'][0]['identifier']) - self.assertEqual(refs['language']['uri'], new_dc['language'][0]['identifier']) - self.assertEqual(refs['access_type']['uri'], new_dc['access_rights']['access_type'][0]['identifier']) - self.assertEqual(refs['license']['uri'], new_dc['access_rights']['license'][0]['identifier']) - self.assertEqual(refs['organization']['uri'], new_dc['publisher']['identifier']) - self.assertEqual(refs['organization']['uri'], - new_dc['access_rights']['has_rights_related_agent'][0]['identifier']) + self.assertEqual( + refs["field_of_science"]["uri"], new_dc["field_of_science"][0]["identifier"] + ) + self.assertEqual(refs["language"]["uri"], new_dc["language"][0]["identifier"]) + self.assertEqual( + refs["access_type"]["uri"], + new_dc["access_rights"]["access_type"][0]["identifier"], + ) + self.assertEqual( + refs["license"]["uri"], new_dc["access_rights"]["license"][0]["identifier"] + ) + self.assertEqual(refs["organization"]["uri"], new_dc["publisher"]["identifier"]) + self.assertEqual( + refs["organization"]["uri"], + new_dc["access_rights"]["has_rights_related_agent"][0]["identifier"], + ) def _assert_label_copied_to_pref_label(self, refs, new_dc): - self.assertEqual(refs['field_of_science']['label'], new_dc['field_of_science'][0].get('pref_label', None)) - self.assertEqual(refs['access_type']['label'], - new_dc['access_rights']['access_type'][0].get('pref_label', None)) + self.assertEqual( + refs["field_of_science"]["label"], + new_dc["field_of_science"][0].get("pref_label", None), + ) + self.assertEqual( + refs["access_type"]["label"], + new_dc["access_rights"]["access_type"][0].get("pref_label", None), + ) def _assert_label_copied_to_title(self, refs, new_dc): - self.assertEqual(refs['license']['label'], new_dc['access_rights']['license'][0].get('title', None)) + self.assertEqual( + refs["license"]["label"], + new_dc["access_rights"]["license"][0].get("title", None), + ) def _assert_label_copied_to_name(self, refs, new_dc): - self.assertEqual(refs['organization']['label'], new_dc['publisher']['name']) + self.assertEqual(refs["organization"]["label"], new_dc["publisher"]["name"]) diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index 18825c13..75034792 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -5,8 +5,8 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from copy import deepcopy import urllib.parse +from copy import deepcopy from datetime import timedelta import responses @@ -27,28 +27,32 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(CatalogRecordApiReadCommon, cls).setUpClass() def setUp(self): - self.cr_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=0) - self.pk = self.cr_from_test_data['id'] - self.metadata_version_identifier = self.cr_from_test_data['research_dataset']['metadata_version_identifier'] - self.preferred_identifier = self.cr_from_test_data['research_dataset']['preferred_identifier'] - self.identifier = self.cr_from_test_data['identifier'] + self.cr_from_test_data = self._get_object_from_test_data("catalogrecord", requested_index=0) + self.pk = self.cr_from_test_data["id"] + self.metadata_version_identifier = self.cr_from_test_data["research_dataset"][ + "metadata_version_identifier" + ] + self.preferred_identifier = self.cr_from_test_data["research_dataset"][ + "preferred_identifier" + ] + self.identifier = self.cr_from_test_data["identifier"] self._use_http_authorization() def create_legacy_dataset(self): cr = deepcopy(self.cr_from_test_data) - cr['data_catalog'] = settings.LEGACY_CATALOGS[0] - cr.pop('identifier') - cr['research_dataset']['preferred_identifier'] = 'ldhkrfdwam' - cr['research_dataset'].pop('files') - cr['research_dataset'].pop('total_files_byte_size') - response = self.client.post('/rest/v2/datasets', cr, format="json") + cr["data_catalog"] = settings.LEGACY_CATALOGS[0] + cr.pop("identifier") + cr["research_dataset"]["preferred_identifier"] = "ldhkrfdwam" + cr["research_dataset"].pop("files") + cr["research_dataset"].pop("total_files_byte_size") + response = self.client.post("/rest/v2/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data['id'] + return response.data["id"] class CatalogRecordApiReadBasicTests(CatalogRecordApiReadCommon): @@ -58,7 +62,7 @@ class CatalogRecordApiReadBasicTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_list(self): - response = self.client.get('/rest/datasets') + response = self.client.get("/rest/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_catalog_record_list_by_ids(self): @@ -67,99 +71,108 @@ def test_read_catalog_record_list_by_ids(self): cr_id_lists = [ [1, 4, 6], [1, 4, 6, 777], - ['cr955e904-e3dd-4d7e-99f1-3fed446f96d7', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d6', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d5'], - ['cr955e904-e3dd-4d7e-99f1-3fed446f96d7', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d6', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d5', - 'something'] + [ + "cr955e904-e3dd-4d7e-99f1-3fed446f96d7", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d6", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", + ], + [ + "cr955e904-e3dd-4d7e-99f1-3fed446f96d7", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d6", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", + "something", + ], ] for id_list in cr_id_lists: - response = self.client.post('/rest/datasets/list', id_list, format="json") + response = self.client.post("/rest/datasets/list", id_list, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) + self.assertEqual(len(response.data["results"]), 3) # check that fields parameter works - response = self.client.post('/rest/datasets/list?fields=id', id_list, format="json") + response = self.client.post("/rest/datasets/list?fields=id", id_list, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) - self.assertEqual(len(response.data['results'][0].keys()), 1) - self.assertEqual(list(response.data['results'][0].keys()), ['id']) + self.assertEqual(len(response.data["results"]), 3) + self.assertEqual(len(response.data["results"][0].keys()), 1) + self.assertEqual(list(response.data["results"][0].keys()), ["id"]) # Failing/empty tests - cr_bad_lists = [ - ['something'], - [999, 777] - ] + cr_bad_lists = [["something"], [999, 777]] for bad_list in cr_bad_lists: - response = self.client.post('/rest/datasets/list', bad_list, format="json") + response = self.client.post("/rest/datasets/list", bad_list, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results'], []) + self.assertEqual(response.data["results"], []) - response = self.client.post('/rest/datasets/list', [], format="json") + response = self.client.post("/rest/datasets/list", [], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Received empty list of identifiers' in response.data['detail']) + self.assertTrue("Received empty list of identifiers" in response.data["detail"]) def test_read_catalog_record_details_by_pk(self): - response = self.client.get('/rest/datasets/%s' % self.pk) + response = self.client.get("/rest/datasets/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['identifier'], self.identifier) - self.assertEqual('identifier' in response.data['data_catalog'], True) + self.assertEqual(response.data["identifier"], self.identifier) + self.assertEqual("identifier" in response.data["data_catalog"], True) def test_read_catalog_record_details_by_identifier(self): - response = self.client.get('/rest/datasets/%s' % self.identifier) + response = self.client.get("/rest/datasets/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['identifier'], - self.identifier) + self.assertEqual(response.data["identifier"], self.identifier) def test_get_by_preferred_identifier(self): cr = CatalogRecord.objects.get(pk=1) - cr.research_dataset['preferred_identifier'] = '%s-/uhoh/special.chars?all&around' % cr.preferred_identifier + cr.research_dataset["preferred_identifier"] = ( + "%s-/uhoh/special.chars?all&around" % cr.preferred_identifier + ) cr.force_save() - response = self.client.get('/rest/datasets?preferred_identifier=%s' % - urllib.parse.quote(cr.preferred_identifier)) + response = self.client.get( + "/rest/datasets?preferred_identifier=%s" % urllib.parse.quote(cr.preferred_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], cr.preferred_identifier) + self.assertEqual( + response.data["research_dataset"]["preferred_identifier"], + cr.preferred_identifier, + ) def test_get_removed_by_preferred_identifier(self): self._use_http_authorization() - response = self.client.delete('/rest/datasets/%s' % self.identifier) + response = self.client.delete("/rest/datasets/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - response = self.client.get('/rest/datasets?preferred_identifier=%s&removed=true' % - urllib.parse.quote(self.preferred_identifier)) + response = self.client.get( + "/rest/datasets?preferred_identifier=%s&removed=true" + % urllib.parse.quote(self.preferred_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_by_preferred_identifier_search_prefers_oldest_data_catalog(self): - ''' + """ Search by preferred_identifier should prefer hits from oldest created catalogs which are assumed to be att/fairdata catalogs. - ''' + """ # get a cr that has alternate records - cr = self._get_object_from_test_data('catalogrecord', requested_index=9) - pid = cr['research_dataset']['preferred_identifier'] + cr = self._get_object_from_test_data("catalogrecord", requested_index=9) + pid = cr["research_dataset"]["preferred_identifier"] # verify there are more than one record with same pid! count = CatalogRecord.objects.filter(research_dataset__preferred_identifier=pid).count() - self.assertEqual(count > 1, True, 'makes no sense to test with a pid that exists only once') + self.assertEqual(count > 1, True, "makes no sense to test with a pid that exists only once") # the retrieved record should be the one that is in catalog 1 - response = self.client.get('/rest/datasets?preferred_identifier=%s' % - urllib.parse.quote(pid)) - self.assertEqual('alternate_record_set' in response.data, True) - self.assertEqual(response.data['data_catalog']['id'], cr['data_catalog']) + response = self.client.get( + "/rest/datasets?preferred_identifier=%s" % urllib.parse.quote(pid) + ) + self.assertEqual("alternate_record_set" in response.data, True) + self.assertEqual(response.data["data_catalog"]["id"], cr["data_catalog"]) def test_read_catalog_record_details_not_found(self): - response = self.client.get('/rest/datasets/shouldnotexist') + response = self.client.get("/rest/datasets/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_catalog_record_metadata_version_identifiers(self): - response = self.client.get('/rest/datasets/metadata_version_identifiers') + response = self.client.get("/rest/datasets/metadata_version_identifiers") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(isinstance(response.data, list)) self.assertTrue(len(response.data) > 0) @@ -168,7 +181,7 @@ def test_get_unique_preferred_identifiers(self): """ Get all unique preferred_identifiers, no matter if they are the latest dataset version or not. """ - response = self.client.get('/rest/datasets/unique_preferred_identifiers') + response = self.client.get("/rest/datasets/unique_preferred_identifiers") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(isinstance(response.data, list)) self.assertTrue(len(response.data) > 0) @@ -178,14 +191,14 @@ def test_get_unique_preferred_identifiers(self): self._create_new_ds() self._create_new_ds() - response = self.client.get('/rest/datasets/unique_preferred_identifiers') - self.assertEqual(len(response.data) - ids_len, 2, 'should be two new PIDs') + response = self.client.get("/rest/datasets/unique_preferred_identifiers") + self.assertEqual(len(response.data) - ids_len, 2, "should be two new PIDs") def test_get_latest_unique_preferred_identifiers(self): """ Get all unique preferred_identifiers, but only from the latest dataset versions. """ - response = self.client.get('/rest/datasets/unique_preferred_identifiers?latest') + response = self.client.get("/rest/datasets/unique_preferred_identifiers?latest") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(isinstance(response.data, list)) self.assertTrue(len(response.data) > 0) @@ -195,70 +208,84 @@ def test_get_latest_unique_preferred_identifiers(self): # files change cr = CatalogRecord.objects.get(pk=1) - new_file_id = cr.files.all().order_by('-id').first().id + 1 - file_from_testdata = self._get_object_from_test_data('file', requested_index=new_file_id) + new_file_id = cr.files.all().order_by("-id").first().id + 1 + file_from_testdata = self._get_object_from_test_data("file", requested_index=new_file_id) # warning, this is actual file metadata, would not pass schema validation if sent through api - cr.research_dataset['files'] = [file_from_testdata] + cr.research_dataset["files"] = [file_from_testdata] cr.save() - response = self.client.get('/rest/datasets/unique_preferred_identifiers?latest') - self.assertEqual(ids_len, len(response.data), 'count should stay the same') + response = self.client.get("/rest/datasets/unique_preferred_identifiers?latest") + self.assertEqual(ids_len, len(response.data), "count should stay the same") # create new self._create_new_ds() self._create_new_ds() - response = self.client.get('/rest/datasets/unique_preferred_identifiers?latest') - self.assertEqual(len(response.data) - ids_len, 2, 'should be two new PIDs') + response = self.client.get("/rest/datasets/unique_preferred_identifiers?latest") + self.assertEqual(len(response.data) - ids_len, 2, "should be two new PIDs") def test_expand_relations(self): cr = CatalogRecord.objects.get(pk=1) cr.contract_id = 1 cr.force_save() - response = self.client.get('/rest/datasets/1?expand_relation=data_catalog,contract') + response = self.client.get("/rest/datasets/1?expand_relation=data_catalog,contract") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('catalog_json' in response.data['data_catalog'], True, response.data['data_catalog']) - self.assertEqual('contract_json' in response.data['contract'], True, response.data['contract']) + self.assertEqual( + "catalog_json" in response.data["data_catalog"], + True, + response.data["data_catalog"], + ) + self.assertEqual( + "contract_json" in response.data["contract"], + True, + response.data["contract"], + ) def test_strip_sensitive_fields(self): """ Strip fields not intended for general public """ + def _check_fields(obj): - for sensitive_field in ['email', 'telephone', 'phone']: - self.assertEqual(sensitive_field not in obj['research_dataset']['curator'][0], True, - 'field %s should have been stripped' % sensitive_field) + for sensitive_field in ["email", "telephone", "phone"]: + self.assertEqual( + sensitive_field not in obj["research_dataset"]["curator"][0], + True, + "field %s should have been stripped" % sensitive_field, + ) for cr in CatalogRecord.objects.filter(pk__in=(1, 2, 3)): - cr.research_dataset['curator'][0].update({ - 'email': 'email@mail.com', - 'phone': '123124', - 'telephone': '123124', - }) + cr.research_dataset["curator"][0].update( + { + "email": "email@mail.com", + "phone": "123124", + "telephone": "123124", + } + ) cr.force_save() self.client._credentials = {} - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) _check_fields(response.data) - response = self.client.get('/rest/datasets') + response = self.client.get("/rest/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - for obj in response.data['results']: + for obj in response.data["results"]: _check_fields(obj) - response = self.client.get('/rest/datasets?no_pagination') + response = self.client.get("/rest/datasets?no_pagination") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) for obj in response.data: _check_fields(obj) def _create_new_ds(self): - new_cr = self.client.get('/rest/datasets/2').data - new_cr.pop('id') - new_cr['research_dataset'].pop('preferred_identifier') - new_cr.pop('identifier') + new_cr = self.client.get("/rest/datasets/2").data + new_cr.pop("id") + new_cr["research_dataset"].pop("preferred_identifier") + new_cr.pop("identifier") self._use_http_authorization() - response = self.client.post('/rest/datasets', new_cr, format='json') + response = self.client.post("/rest/datasets", new_cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) @@ -269,119 +296,145 @@ class CatalogRecordApiReadBasicAuthorizationTests(CatalogRecordApiReadCommon): # THE OK TESTS - def test_returns_all_file_dir_info_for_open_catalog_record_if_no_authorization(self): + def test_returns_all_file_dir_info_for_open_catalog_record_if_no_authorization( + self, + ): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for open cr /rest/datasets/ without # authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") - def test_returns_all_file_dir_info_for_login_catalog_record_if_no_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(use_login_access_type=True) + def test_returns_all_file_dir_info_for_login_catalog_record_if_no_authorization( + self, + ): + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + use_login_access_type=True + ) # Verify all file and dir details info is returned for login cr /rest/datasets/ without authorization - self._assert_ok(login_cr_json, 'no') + self._assert_ok(login_cr_json, "no") @responses.activate - def test_returns_all_file_dir_info_for_open_catalog_record_if_owner_authorization(self): + def test_returns_all_file_dir_info_for_open_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) # Verify all file and dir details info is returned for open owner-owned cr /rest/datasets/ with # owner authorization - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") @responses.activate - def test_returns_all_file_dir_info_for_login_catalog_record_if_owner_authorization(self): + def test_returns_all_file_dir_info_for_login_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(set_owner=True, - use_login_access_type=True) + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + set_owner=True, use_login_access_type=True + ) # Verify all file and dir details info is returned for login owner-owned cr /rest/datasets/ with # owner authorization - self._assert_ok(login_cr_json, 'owner') + self._assert_ok(login_cr_json, "owner") - def test_returns_all_file_dir_info_for_restricted_catalog_record_if_service_authorization(self): + def test_returns_all_file_dir_info_for_restricted_catalog_record_if_service_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for restricted cr /rest/datasets/ with # service authorization - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate - def test_returns_all_file_dir_info_for_restricted_catalog_record_if_owner_authorization(self): + def test_returns_all_file_dir_info_for_restricted_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify all file and dir details info is returned for restricted owner-owned cr /rest/datasets/ with # owner authorization - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_all_file_dir_info_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_all_file_dir_info_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify all file and dir details info is returned for embargoed cr /rest/datasets/ when # embargo date has been reached without authorization - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS - def test_returns_limited_file_dir_info_for_restricted_catalog_record_if_no_authorization(self): + def test_returns_limited_file_dir_info_for_restricted_catalog_record_if_no_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify limited file and dir info for restricted cr /rest/datasets/ without authorization - self._assert_limited_or_no_file_dir_info(restricted_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(restricted_cr_json, "no") - def test_no_file_dir_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details( - False) + def test_no_file_dir_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify no file and dir info for embargoed cr /rest/datasets/ when embargo date has not # been reached without authorization - self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, "no") def _assert_limited_or_no_file_dir_info(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/datasets/{0}?file_details'.format(pk)) + response = self.client.get("/rest/datasets/{0}?file_details".format(pk)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertFalse('identifier' in f) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertFalse("identifier" in f) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertFalse('identifier' in d) + self.assertFalse("identifier" in d) def _assert_ok(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/datasets/{0}?file_details'.format(pk)) + response = self.client.get("/rest/datasets/{0}?file_details".format(pk)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertTrue('identifier' in f) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertTrue("identifier" in f) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertTrue('identifier' in d) + self.assertTrue("identifier" in d) class CatalogRecordApiReadPreservationStateTests(CatalogRecordApiReadCommon): @@ -390,134 +443,159 @@ class CatalogRecordApiReadPreservationStateTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_search_by_preservation_state(self): - ''' + """ Various simple filtering requests - ''' - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?{}=0'.format(queryparam)) + """ + for queryparam in ("preservation_state", "state"): + response = self.client.get("/rest/datasets?{}=0".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']) > 2, True, - 'There should have been multiple results for state=0 request') + self.assertEqual( + len(response.data["results"]) > 2, + True, + "There should have been multiple results for state=0 request", + ) - response = self.client.get('/rest/datasets?{}=10'.format(queryparam)) + response = self.client.get("/rest/datasets?{}=10".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 2) + self.assertEqual(len(response.data["results"]), 2) - response = self.client.get('/rest/datasets?{}=40'.format(queryparam)) + response = self.client.get("/rest/datasets?{}=40".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) + self.assertEqual(len(response.data["results"]), 1) def test_read_catalog_record_search_by_preservation_state_666(self): - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?{}=666'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get("/rest/datasets?{}=666".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0, 'should return empty list') + self.assertEqual(len(response.data["results"]), 0, "should return empty list") def test_read_catalog_record_search_by_preservation_state_many(self): - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?{}=10,40'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get("/rest/datasets?{}=10,40".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) - self.assertEqual(response.data['results'][0]['preservation_state'], 10) - self.assertEqual(response.data['results'][1]['preservation_state'], 10) - self.assertEqual(response.data['results'][2]['preservation_state'], 40) + self.assertEqual(len(response.data["results"]), 3) + self.assertEqual(response.data["results"][0]["preservation_state"], 10) + self.assertEqual(response.data["results"][1]["preservation_state"], 10) + self.assertEqual(response.data["results"][2]["preservation_state"], 40) def test_read_catalog_record_search_by_preservation_state_invalid_value(self): - response = self.client.get('/rest/datasets?state=1,a') + response = self.client.get("/rest/datasets?state=1,a") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('is not an integer' in response.data['state'][0], True, - 'Error should say letter a is not an integer') + self.assertEqual( + "is not an integer" in response.data["state"][0], + True, + "Error should say letter a is not an integer", + ) - response = self.client.get('/rest/datasets?preservation_state=1,a') + response = self.client.get("/rest/datasets?preservation_state=1,a") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('is not an integer' in response.data['preservation_state'][0], True, - 'Error should say letter a is not an integer') + self.assertEqual( + "is not an integer" in response.data["preservation_state"][0], + True, + "Error should say letter a is not an integer", + ) -class CatalogRecordApiReadActorFilter(CatalogRecordApiReadCommon): +class CatalogRecordApiReadActorFilter(CatalogRecordApiReadCommon): def test_agents_and_actors(self): # set test conditions cr = CatalogRecord.objects.get(pk=11) - cr.research_dataset['curator'] = [] - cr.research_dataset['curator'].append({ - '@type': 'Person', - 'name': 'Tarmo Termiitti', - 'member_of': { - 'identifier': 'org_identifier', - 'name': { - 'en': 'Unique Organization' - } + cr.research_dataset["curator"] = [] + cr.research_dataset["curator"].append( + { + "@type": "Person", + "name": "Tarmo Termiitti", + "member_of": { + "identifier": "org_identifier", + "name": {"en": "Unique Organization"}, + }, } - }) - cr.research_dataset['curator'].append({ '@type': 'Person', 'name': 'Keijo Kottarainen' }) - cr.research_dataset['curator'].append({ '@type': 'Person', 'name': 'Janus JƤrvinen' }) - cr.research_dataset['curator'].append({ '@type': 'Person', 'name': 'Laina Sakkonen' }) - cr.research_dataset['curator'].append({ - '@type': 'Person', - 'name': 'Kaisa Kuraattori', - 'member_of': { - 'identifier': 'org_identifier', - 'name': { - 'en': 'Happy Organization' - } + ) + cr.research_dataset["curator"].append({"@type": "Person", "name": "Keijo Kottarainen"}) + cr.research_dataset["curator"].append({"@type": "Person", "name": "Janus JƤrvinen"}) + cr.research_dataset["curator"].append({"@type": "Person", "name": "Laina Sakkonen"}) + cr.research_dataset["curator"].append( + { + "@type": "Person", + "name": "Kaisa Kuraattori", + "member_of": { + "identifier": "org_identifier", + "name": {"en": "Happy Organization"}, + }, } - }) - cr.research_dataset['creator'] = [] - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Unique Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Happy Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Sad Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Brilliant Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': {'en': 'Wonderful Organization'} }) - cr.research_dataset['publisher']['name'] = {} - cr.research_dataset['publisher']['name'] = { 'fi': 'Originaali Organisaatio' } + ) + cr.research_dataset["creator"] = [] + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Unique Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Happy Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Sad Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Brilliant Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Wonderful Organization"}} + ) + cr.research_dataset["publisher"]["name"] = {} + cr.research_dataset["publisher"]["name"] = {"fi": "Originaali Organisaatio"} cr.force_save() - response = self.client.get('/rest/datasets?creator_organization=happy') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?creator_organization=happy") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/datasets?creator_organization=Brilliant Organization') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?creator_organization=Brilliant Organization") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/datasets?curator_person=termiitti') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?curator_person=termiitti") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/datasets?curator_person=Laina Sakkonen') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?curator_person=Laina Sakkonen") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/datasets?curator_organization=uniqu') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?curator_organization=uniqu") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/datasets?curator_organization=Happy Organization') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?curator_organization=Happy Organization") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/datasets?publisher_organization=originaali Organisaatio') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/datasets?publisher_organization=originaali Organisaatio") + self.assertEqual(len(response.data["results"]), 1, response.data) - query = 'curator_person=notfound&creator_organization=sad organ&condition_separator=AND' - response = self.client.get('/rest/datasets?%s' % query) - self.assertEqual(len(response.data['results']), 0, response.data) + query = "curator_person=notfound&creator_organization=sad organ&condition_separator=AND" + response = self.client.get("/rest/datasets?%s" % query) + self.assertEqual(len(response.data["results"]), 0, response.data) - query = 'curator_person=notfound&creator_organization=sad organ&condition_separator=OR' - response = self.client.get('/rest/datasets?%s' % query) - self.assertEqual(len(response.data['results']), 1, response.data) + query = "curator_person=notfound&creator_organization=sad organ&condition_separator=OR" + response = self.client.get("/rest/datasets?%s" % query) + self.assertEqual(len(response.data["results"]), 1, response.data) # test filter with pas filter """ Both organization and pas filters use internally Q-filters which are supposed to be AND'ed together. """ metax_user = settings.API_METAX_USER - self._use_http_authorization(username=metax_user['username'], password=metax_user['password']) + self._use_http_authorization( + username=metax_user["username"], password=metax_user["password"] + ) - response = self.client.get('/rest/datasets?pas_filter=janus&creator_organization=sad organization') - self.assertEqual(len(response.data['results']), 1) + response = self.client.get( + "/rest/datasets?pas_filter=janus&creator_organization=sad organization" + ) + self.assertEqual(len(response.data["results"]), 1) - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?{}=10&pas_filter=kaisa&' - 'creator_organization=notfound'.format(queryparam)) - self.assertEqual(len(response.data['results']), 0) + for queryparam in ("preservation_state", "state"): + response = self.client.get( + "/rest/datasets?{}=10&pas_filter=kaisa&" + "creator_organization=notfound".format(queryparam) + ) + self.assertEqual(len(response.data["results"]), 0) -class CatalogRecordApiReadPASFilter(CatalogRecordApiReadCommon): +class CatalogRecordApiReadPASFilter(CatalogRecordApiReadCommon): def test_pas_filter(self): """ Test query param pas_filter which should search from various fields using the same search term. @@ -527,54 +605,64 @@ def test_pas_filter(self): cr = CatalogRecord.objects.get(pk=1) cr.preservation_state = 10 cr.contract_id = 1 - cr.research_dataset['title']['en'] = 'Catch me if you can' - cr.research_dataset['title']['fi'] = 'Ota kiinni jos saat' - cr.research_dataset['curator'] = [] - cr.research_dataset['curator'].append({ 'name': 'Seppo Hovi' }) - cr.research_dataset['curator'].append({ 'name': 'Esa Nieminen' }) - cr.research_dataset['curator'].append({ 'name': 'Aku Ankka' }) - cr.research_dataset['curator'].append({ 'name': 'Jaska Jokunen' }) + cr.research_dataset["title"]["en"] = "Catch me if you can" + cr.research_dataset["title"]["fi"] = "Ota kiinni jos saat" + cr.research_dataset["curator"] = [] + cr.research_dataset["curator"].append({"name": "Seppo Hovi"}) + cr.research_dataset["curator"].append({"name": "Esa Nieminen"}) + cr.research_dataset["curator"].append({"name": "Aku Ankka"}) + cr.research_dataset["curator"].append({"name": "Jaska Jokunen"}) cr.force_save() contract = Contract.objects.get(pk=1) - contract.contract_json['title'] = 'An Important Agreement' + contract.contract_json["title"] = "An Important Agreement" contract.save() metax_user = settings.API_METAX_USER - self._use_http_authorization(username=metax_user['username'], password=metax_user['password']) + self._use_http_authorization( + username=metax_user["username"], password=metax_user["password"] + ) # beging testing - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?{}=10&pas_filter=if you'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get("/rest/datasets?{}=10&pas_filter=if you".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) + self.assertEqual(len(response.data["results"]), 1) - response = self.client.get('/rest/datasets?{}=10&pas_filter=kiinni jos'.format(queryparam)) - self.assertEqual(len(response.data['results']), 1) + response = self.client.get( + "/rest/datasets?{}=10&pas_filter=kiinni jos".format(queryparam) + ) + self.assertEqual(len(response.data["results"]), 1) - response = self.client.get('/rest/datasets?{}=10&pas_filter=niemine'.format(queryparam)) - self.assertEqual(len(response.data['results']), 1) + response = self.client.get("/rest/datasets?{}=10&pas_filter=niemine".format(queryparam)) + self.assertEqual(len(response.data["results"]), 1) # more than 3 curators, requires typing exact case-sensitive name... see comments in related code - response = self.client.get('/rest/datasets?{}=10&pas_filter=jokunen'.format(queryparam)) - self.assertEqual(len(response.data['results']), 0) - response = self.client.get('/rest/datasets?{}=10&pas_filter=Jaska Jokunen'.format(queryparam)) - self.assertEqual(len(response.data['results']), 1) + response = self.client.get("/rest/datasets?{}=10&pas_filter=jokunen".format(queryparam)) + self.assertEqual(len(response.data["results"]), 0) + response = self.client.get( + "/rest/datasets?{}=10&pas_filter=Jaska Jokunen".format(queryparam) + ) + self.assertEqual(len(response.data["results"]), 1) # contract_id 1 has several other associated test datasets - response = self.client.get('/rest/datasets?{}=10&pas_filter=agreement'.format(queryparam)) - self.assertEqual(len(response.data['results']), 3) + response = self.client.get( + "/rest/datasets?{}=10&pas_filter=agreement".format(queryparam) + ) + self.assertEqual(len(response.data["results"]), 3) - response = self.client.get('/rest/datasets?{}=10&pas_filter=does not exist'.format(queryparam)) - self.assertEqual(len(response.data['results']), 0) + response = self.client.get( + "/rest/datasets?{}=10&pas_filter=does not exist".format(queryparam) + ) + self.assertEqual(len(response.data["results"]), 0) def test_pas_filter_is_restricted(self): """ Query param is permitted to users metax and tpas. """ - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?{}=10&pas_filter=hmmm'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get("/rest/datasets?{}=10&pas_filter=hmmm".format(queryparam)) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @@ -585,151 +673,183 @@ class CatalogRecordApiReadQueryParamsTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_search_by_curator_1(self): - response = self.client.get('/rest/datasets?curator=id:of:curator:rahikainen') + response = self.client.get("/rest/datasets?curator=id:of:curator:rahikainen") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') - self.assertEqual(response.data['results'][4]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) + self.assertEqual( + response.data["results"][4]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_2(self): - response = self.client.get('/rest/datasets?curator=id:of:curator:jarski') + response = self.client.get("/rest/datasets?curator=id:of:curator:jarski") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 4) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Jarski', - 'Curator name is not matching') - self.assertEqual(response.data['results'][3]['research_dataset']['curator'][0]['name'], 'Jarski', - 'Curator name is not matching') + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Jarski", + "Curator name is not matching", + ) + self.assertEqual( + response.data["results"][3]["research_dataset"]["curator"][0]["name"], + "Jarski", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_not_found_1(self): - response = self.client.get('/rest/datasets?curator=Not Found') + response = self.client.get("/rest/datasets?curator=Not Found") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) def test_read_catalog_record_search_by_curator_not_found_case_sensitivity(self): - response = self.client.get('/rest/datasets?curator=id:of:curator:Rahikainen') + response = self.client.get("/rest/datasets?curator=id:of:curator:Rahikainen") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) def test_read_catalog_record_search_by_curator_and_state_1(self): - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?curator=id:of:curator:rahikainen&{}=10'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get( + "/rest/datasets?curator=id:of:curator:rahikainen&{}=10".format(queryparam) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 2) - self.assertEqual(response.data['results'][0]['id'], 2) - self.assertEqual(response.data['results'][0]['preservation_state'], 10) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') + self.assertEqual(len(response.data["results"]), 2) + self.assertEqual(response.data["results"][0]["id"], 2) + self.assertEqual(response.data["results"][0]["preservation_state"], 10) + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_and_state_2(self): - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?curator=id:of:curator:rahikainen&{}=40'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get( + "/rest/datasets?curator=id:of:curator:rahikainen&{}=40".format(queryparam) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['id'], 4) - self.assertEqual(response.data['results'][0]['preservation_state'], 40) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["id"], 4) + self.assertEqual(response.data["results"][0]["preservation_state"], 40) + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_and_state_not_found(self): - for queryparam in ('preservation_state', 'state'): - response = self.client.get('/rest/datasets?curator=id:of:curator:rahikainen&{}=55'.format(queryparam)) + for queryparam in ("preservation_state", "state"): + response = self.client.get( + "/rest/datasets?curator=id:of:curator:rahikainen&{}=55".format(queryparam) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) def test_read_catalog_record_search_by_owner_id(self): cr = CatalogRecord.objects.get(pk=1) - cr.editor = { 'owner_id': '123' } + cr.editor = {"owner_id": "123"} cr.save() - response = self.client.get('/rest/datasets?owner_id=123') + response = self.client.get("/rest/datasets?owner_id=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['editor']['owner_id'], '123') + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["editor"]["owner_id"], "123") def test_read_catalog_record_search_by_creator_id(self): cr = CatalogRecord.objects.get(pk=1) - cr.user_created = '123' + cr.user_created = "123" cr.force_save() - response = self.client.get('/rest/datasets?user_created=123') + response = self.client.get("/rest/datasets?user_created=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['user_created'], '123') + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["user_created"], "123") def test_read_catalog_record_search_by_editor(self): - response = self.client.get('/rest/datasets?editor=mspaint') + response = self.client.get("/rest/datasets?editor=mspaint") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], 0) + self.assertEqual(response.data["count"], 0) - response = self.client.get('/rest/datasets?editor=qvain') + response = self.client.get("/rest/datasets?editor=qvain") self.assertEqual(response.status_code, status.HTTP_200_OK) - qvain_records_count = response.data['count'] + qvain_records_count = response.data["count"] self.assertEqual(qvain_records_count > 0, True) - response = self.client.get('/rest/datasets') - self.assertNotEqual(response.data['count'], qvain_records_count, 'looks like filtering had no effect') + response = self.client.get("/rest/datasets") + self.assertNotEqual( + response.data["count"], + qvain_records_count, + "looks like filtering had no effect", + ) def test_read_catalog_record_search_by_metadata_provider_user(self): - response = self.client.get('/rest/datasets?metadata_provider_user=123') + response = self.client.get("/rest/datasets?metadata_provider_user=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], 0) + self.assertEqual(response.data["count"], 0) cr = CatalogRecord.objects.get(pk=1) - cr.metadata_provider_user = '123' + cr.metadata_provider_user = "123" cr.force_save() - response = self.client.get('/rest/datasets?metadata_provider_user=123') + response = self.client.get("/rest/datasets?metadata_provider_user=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], 1) + self.assertEqual(response.data["count"], 1) def test_read_catalog_record_search_by_metadata_owner_org(self): - owner_org = 'org_id' + owner_org = "org_id" for cr in CatalogRecord.objects.filter(pk__in=[1, 2, 3]): cr.metadata_owner_org = owner_org cr.force_save() - owner_org_2 = 'org_id_2' + owner_org_2 = "org_id_2" for cr in CatalogRecord.objects.filter(pk__in=[4, 5, 6]): cr.metadata_owner_org = owner_org_2 cr.force_save() - response = self.client.get('/rest/datasets?metadata_owner_org=%s' % owner_org) + response = self.client.get("/rest/datasets?metadata_owner_org=%s" % owner_org) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) + self.assertEqual(len(response.data["results"]), 3) - response = self.client.get('/rest/datasets?metadata_owner_org=%s,%s' % (owner_org, owner_org_2)) + response = self.client.get( + "/rest/datasets?metadata_owner_org=%s,%s" % (owner_org, owner_org_2) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 6) + self.assertEqual(len(response.data["results"]), 6) def test_filter_by_contract_org_identifier(self): """ Test filtering by contract_org_identifier, which matches using iregex """ metax_user = settings.API_METAX_USER - self._use_http_authorization(username=metax_user['username'], password=metax_user['password']) + self._use_http_authorization( + username=metax_user["username"], password=metax_user["password"] + ) - response = self.client.get('/rest/datasets?contract_org_identifier=2345') + response = self.client.get("/rest/datasets?contract_org_identifier=2345") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) + self.assertEqual(len(response.data["results"]), 10) - response = self.client.get('/rest/datasets?contract_org_identifier=1234567-1') - self.assertEqual(len(response.data['results']), 10) + response = self.client.get("/rest/datasets?contract_org_identifier=1234567-1") + self.assertEqual(len(response.data["results"]), 10) - response = self.client.get('/rest/datasets?contract_org_identifier=1234567-123') - self.assertEqual(len(response.data['results']), 0) + response = self.client.get("/rest/datasets?contract_org_identifier=1234567-123") + self.assertEqual(len(response.data["results"]), 0) def test_filter_by_contract_org_identifier_is_restricted(self): - response = self.client.get('/rest/datasets?contract_org_identifier=1234') + response = self.client.get("/rest/datasets?contract_org_identifier=1234") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_read_catalog_record_search_by_data_catalog_id(self): from metax_api.models.data_catalog import DataCatalog # Create a new data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) - dc_id = 'original_dc_identifier' - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/datacatalogs', dc, format="json") + dc = self._get_object_from_test_data("datacatalog", requested_index=0) + dc_id = "original_dc_identifier" + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/datacatalogs", dc, format="json") # Set the new data catalog for a catalog record and store the catalog record cr = CatalogRecord.objects.get(pk=1) @@ -737,51 +857,51 @@ def test_read_catalog_record_search_by_data_catalog_id(self): cr.force_save() # Verify - response = self.client.get('/rest/datasets?data_catalog={0}'.format(dc_id)) + response = self.client.get("/rest/datasets?data_catalog={0}".format(dc_id)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['data_catalog']['identifier'], dc_id) + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["data_catalog"]["identifier"], dc_id) def test_filter_by_deprecated(self): cr = CatalogRecord.objects.get(pk=1) cr.deprecated = True cr.force_save() - response = self.client.get('/rest/datasets?deprecated=true') + response = self.client.get("/rest/datasets?deprecated=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1, response.data['results']) - self.assertTrue(response.data['results'][0]['deprecated'], response.data) + self.assertEqual(len(response.data["results"]), 1, response.data["results"]) + self.assertTrue(response.data["results"][0]["deprecated"], response.data) - response = self.client.get('/rest/datasets?deprecated=false') + response = self.client.get("/rest/datasets?deprecated=false") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results'][0]['id'], 2, response.data) - self.assertFalse(response.data['results'][0]['deprecated'], response.data) + self.assertEqual(response.data["results"][0]["id"], 2, response.data) + self.assertFalse(response.data["results"][0]["deprecated"], response.data) - response = self.client.get('/rest/datasets?deprecated=badbool') + response = self.client.get("/rest/datasets?deprecated=badbool") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_by_api_version(self): # update one dataset to v2 so that we get non-zero values for that as well - response = self.client.put('/rest/v2/datasets', [self.cr_from_test_data], format='json') + response = self.client.put("/rest/v2/datasets", [self.cr_from_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) # both models return correct count because v2 model is just a proxy - v1_count = CatalogRecord.objects.filter(api_meta__contains={'version': 1}).count() + v1_count = CatalogRecord.objects.filter(api_meta__contains={"version": 1}).count() - v2_count = CatalogRecord.objects.filter(api_meta__contains={'version': 2}).count() + v2_count = CatalogRecord.objects.filter(api_meta__contains={"version": 2}).count() - response = self.client.get('/rest/datasets?api_version=1') + response = self.client.get("/rest/datasets?api_version=1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(v1_count, response.data['count'], response.data) + self.assertEqual(v1_count, response.data["count"], response.data) - response = self.client.get('/rest/datasets?api_version=2') + response = self.client.get("/rest/datasets?api_version=2") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(v2_count, response.data['count'], response.data) + self.assertEqual(v2_count, response.data["count"], response.data) - response = self.client.get('/rest/datasets?api_version=not_int') + response = self.client.get("/rest/datasets?api_version=not_int") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('not an integer' in response.data['api_version'][0], response.data) + self.assertTrue("not an integer" in response.data["api_version"][0], response.data) def test_filter_by_legacy(self): self.create_legacy_data_catalogs() @@ -791,15 +911,16 @@ def test_filter_by_legacy(self): non_legacy_count = CatalogRecord.objects.exclude( data_catalog__catalog_json__identifier__in=settings.LEGACY_CATALOGS ).count() - response = self.client.get('/rest/datasets') + response = self.client.get("/rest/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(non_legacy_count, response.data['count'], response.data) + self.assertEqual(non_legacy_count, response.data["count"], response.data) # legacy datasets can be included with a parameter count_all = CatalogRecord.objects.count() - response = self.client.get('/rest/datasets?include_legacy') + response = self.client.get("/rest/datasets?include_legacy") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(count_all, response.data['count'], response.data) + self.assertEqual(count_all, response.data["count"], response.data) + class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): @@ -809,52 +930,61 @@ class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): def _create_dataset_with_doi(self): # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = settings.IDA_DATA_CATALOG_IDENTIFIER - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/datacatalogs", dc, format="json") # Create new cr by requesting a doi identifier - cr_json = self.client.get('/rest/datasets/1').data - cr_json.pop('preservation_identifier', None) - cr_json.pop('identifier') - cr_json['research_dataset'].pop('preferred_identifier', None) - cr_json['research_dataset']['publisher'] = {'@type': 'Organization', 'name': {'und': 'Testaaja'}} - cr_json['research_dataset']['issued'] = '2010-01-01' - cr_json['data_catalog'] = dc_id - response = self.client.post('/rest/datasets?pid_type=doi', cr_json, format="json") + cr_json = self.client.get("/rest/datasets/1").data + cr_json.pop("preservation_identifier", None) + cr_json.pop("identifier") + cr_json["research_dataset"].pop("preferred_identifier", None) + cr_json["research_dataset"]["publisher"] = { + "@type": "Organization", + "name": {"und": "Testaaja"}, + } + cr_json["research_dataset"]["issued"] = "2010-01-01" + cr_json["data_catalog"] = dc_id + response = self.client.post("/rest/datasets?pid_type=doi", cr_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data def test_read_dataset_xml_format_metax(self): - response = self.client.get('/rest/datasets/1?dataset_format=metax') - self._check_dataset_xml_format_response(response, '%s' % doi[len('doi:'):] in response.data, - True, response.data) + self.assertEqual( + '%s' % doi[len("doi:") :] in response.data, + True, + response.data, + ) def test_read_dataset_format_datacite_odd_lang_abbrevation(self): cr = CatalogRecord.objects.get(pk=1) - cr.research_dataset['publisher'] = {'@type': 'Organization', 'name': {'zk': 'Testiorganisaatio'}} + cr.research_dataset["publisher"] = { + "@type": "Organization", + "name": {"zk": "Testiorganisaatio"}, + } cr.force_save() - response = self.client.get('/rest/datasets/1?dataset_format=fairdata_datacite') + response = self.client.get("/rest/datasets/1?dataset_format=fairdata_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_read_dataset_format_dummy_datacite_doi(self): @@ -863,28 +993,44 @@ def test_read_dataset_format_dummy_datacite_doi(self): and identifier value prefixed with 10.0/. If a real DOI is available in the dataset, then dummy should NOT be returned. """ - pid = self.client.get('/rest/datasets/12').data['research_dataset']['preferred_identifier'] - self.assertEqual(pid.startswith('doi:'), False, pid) + pid = self.client.get("/rest/datasets/12").data["research_dataset"]["preferred_identifier"] + self.assertEqual(pid.startswith("doi:"), False, pid) - for dataset_format in ['datacite', 'fairdata_datacite']: - response = self.client.get('/rest/datasets/12?dataset_format=%s&dummy_doi=true' % dataset_format) + for dataset_format in ["datacite", "fairdata_datacite"]: + response = self.client.get( + "/rest/datasets/12?dataset_format=%s&dummy_doi=true" % dataset_format + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('' in response.data, True, response.data) - self.assertEqual('10.0/%s' % pid in response.data, True, response.data) + self.assertEqual( + '' in response.data, + True, + response.data, + ) + self.assertEqual("10.0/%s" % pid in response.data, True, response.data) # ensure if a real doi exists, then dummy should never be returned cr = self._create_dataset_with_doi() - response = self.client.get('/rest/datasets/%d?dataset_format=datacite&dummy_doi=true' % cr['id']) + response = self.client.get( + "/rest/datasets/%d?dataset_format=datacite&dummy_doi=true" % cr["id"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual('' in response.data, True, response.data) - self.assertEqual(cr['preservation_identifier'][len('doi:'):] in response.data, True, response.data) - self.assertEqual('10.0/%s' % pid in response.data, False, response.data) + self.assertEqual( + cr["preservation_identifier"][len("doi:") :] in response.data, + True, + response.data, + ) + self.assertEqual("10.0/%s" % pid in response.data, False, response.data) def _check_dataset_xml_format_response(self, response, element_name): self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('content-type' in response._headers, True, response._headers) - self.assertEqual('application/xml' in response._headers['content-type'][1], True, response._headers) - self.assertEqual(' 6) @@ -937,35 +1085,64 @@ class CatalogRecordApiReadPopulateFileInfoTests(CatalogRecordApiReadCommon): def test_file_details_populated(self): # without the flag nothing should happen - response = self.client.get('/rest/datasets/1') - self.assertEqual(all('details' not in f for f in response.data['research_dataset']['files']), True) + response = self.client.get("/rest/datasets/1") + self.assertEqual( + all("details" not in f for f in response.data["research_dataset"]["files"]), + True, + ) - response = self.client.get('/rest/datasets/1?file_details') + response = self.client.get("/rest/datasets/1?file_details") self.assertEqual(response.status_code, status.HTTP_200_OK) # check all fiels have the extra key 'details', and all details have the key 'identifier'. # presumably the details were then filled in. - self.assertEqual(all('details' in f for f in response.data['research_dataset']['files']), True) - self.assertEqual(all('identifier' in f['details'] for f in response.data['research_dataset']['files']), True) + self.assertEqual( + all("details" in f for f in response.data["research_dataset"]["files"]), + True, + ) + self.assertEqual( + all("identifier" in f["details"] for f in response.data["research_dataset"]["files"]), + True, + ) def test_directory_details_populated(self): # id 11 is one of the example datasets with full details. they should have a couple # of directories attached. CatalogRecord.objects.get(pk=11).calculate_directory_byte_sizes_and_file_counts() - response = self.client.get('/rest/datasets/11?file_details') + response = self.client.get("/rest/datasets/11?file_details") self.assertEqual(response.status_code, status.HTTP_200_OK) # check all dirs have the extra key 'details', and all details have the key 'identifier'. # presumably the details were then filled in. - self.assertEqual(all('details' in f for f in response.data['research_dataset']['directories']), True) - self.assertEqual(all('identifier' in f['details'] for f in response.data['research_dataset']['directories']), - True) + self.assertEqual( + all("details" in f for f in response.data["research_dataset"]["directories"]), + True, + ) + self.assertEqual( + all( + "identifier" in f["details"] + for f in response.data["research_dataset"]["directories"] + ), + True, + ) # additionally check that file counts and total byte sizes are as expected - self.assertEqual(response.data['research_dataset']['directories'][0]['details']['byte_size'], 21000) - self.assertEqual(response.data['research_dataset']['directories'][1]['details']['byte_size'], 21000) - self.assertEqual(response.data['research_dataset']['directories'][0]['details']['file_count'], 20) - self.assertEqual(response.data['research_dataset']['directories'][1]['details']['file_count'], 20) + self.assertEqual( + response.data["research_dataset"]["directories"][0]["details"]["byte_size"], + 21000, + ) + self.assertEqual( + response.data["research_dataset"]["directories"][1]["details"]["byte_size"], + 21000, + ) + self.assertEqual( + response.data["research_dataset"]["directories"][0]["details"]["file_count"], + 20, + ) + self.assertEqual( + response.data["research_dataset"]["directories"][1]["details"]["file_count"], + 20, + ) def test_file_details_for_deprecated_datasets(self): """ @@ -978,12 +1155,13 @@ def test_file_details_for_deprecated_datasets(self): cr = CatalogRecord.objects.get(pk=11) file_identifiers = File.objects.filter( - project_identifier=cr.files.all()[0].project_identifier).values_list('identifier', flat=True) + project_identifier=cr.files.all()[0].project_identifier + ).values_list("identifier", flat=True) - response = self.client.delete('/rest/files', data=file_identifiers, format='json') + response = self.client.delete("/rest/files", data=file_identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/datasets/11?file_details', format='json') + response = self.client.get("/rest/datasets/11?file_details", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -1000,21 +1178,21 @@ def test_returns_all_details_for_open_catalog_record_if_no_authorization(self): # Verify all file and dir details info is returned for open cr /rest/datasets/?file_details without # authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_all_details_for_login_catalog_record_if_no_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for open cr /rest/datasets/?file_details without # authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_all_details_for_open_catalog_record_if_service_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for open cr /rest/datasets/?file_details with # service authorization - self._assert_ok(open_cr_json, 'service') + self._assert_ok(open_cr_json, "service") @responses.activate def test_returns_all_details_for_open_catalog_record_if_owner_authorization(self): @@ -1023,88 +1201,104 @@ def test_returns_all_details_for_open_catalog_record_if_owner_authorization(self # Verify all file and dir details info is returned for open owner-owned cr /rest/datasets/?file_details with # owner authorization - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") - def test_returns_all_details_for_restricted_catalog_record_if_service_authorization(self): + def test_returns_all_details_for_restricted_catalog_record_if_service_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for restricted cr /rest/datasets/?file_details with # service authorization - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate - def test_returns_all_details_for_restricted_catalog_record_if_owner_authorization(self): + def test_returns_all_details_for_restricted_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify all file and dir details info is returned for restricted owner-owned cr # /rest/datasets/?file_details with owner authorization - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_all_details_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_all_details_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify all file and dir details info is returned for embargoed cr /rest/datasets/?file_details when # embargo date has been reached without authorization - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS - def test_returns_limited_info_for_restricted_catalog_record_if_no_authorization(self): + def test_returns_limited_info_for_restricted_catalog_record_if_no_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify limited file and dir info for restricted cr /rest/datasets/?file_details without authorization - self._assert_limited_or_no_file_dir_info(restricted_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(restricted_cr_json, "no") - def test_returns_limited_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + def test_returns_limited_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify limited file and dir info for embargoed cr /rest/datasets/?file_details when embargo date has not # been reached without authorization - self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, "no") def _assert_limited_or_no_file_dir_info(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/datasets/{0}?file_details'.format(pk)) + response = self.client.get("/rest/datasets/{0}?file_details".format(pk)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertTrue(len(f['details'].keys()) < 5) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertTrue(len(f["details"].keys()) < 5) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertTrue(len(d['details'].keys()) < 5) + self.assertTrue(len(d["details"].keys()) < 5) def _assert_ok(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/datasets/{0}?file_details'.format(pk)) + response = self.client.get("/rest/datasets/{0}?file_details".format(pk)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertTrue(len(f['details'].keys()) > 5) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertTrue(len(f["details"].keys()) > 5) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertTrue(len(d['details'].keys()) > 5) + self.assertTrue(len(d["details"].keys()) > 5) class CatalogRecordApiReadFiles(CatalogRecordApiReadCommon): @@ -1115,7 +1309,7 @@ class CatalogRecordApiReadFiles(CatalogRecordApiReadCommon): def test_get_files(self): file_count = CatalogRecord.objects.get(pk=1).files.count() - response = self.client.get('/rest/datasets/1/files') + response = self.client.get("/rest/datasets/1/files") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), file_count) @@ -1123,19 +1317,19 @@ def test_get_files_specified_fields_only(self): """ Test use of query parameter ?file_fields=x,y,z """ - response = self.client.get('/rest/datasets/1/files?file_fields=identifier,file_path') + response = self.client.get("/rest/datasets/1/files?file_fields=identifier,file_path") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data[0].keys()), 2) - self.assertEqual('identifier' in response.data[0], True) - self.assertEqual('file_path' in response.data[0], True) + self.assertEqual("identifier" in response.data[0], True) + self.assertEqual("file_path" in response.data[0], True) def test_removed_query_param(self): """ Test use of query parameter removed_files=bool in /datasets/pid/files, which should return only deleted files. """ - response = self.client.get('/rest/datasets/1/files') - file_ids_before = set([ f['id'] for f in response.data ]) + response = self.client.get("/rest/datasets/1/files") + file_ids_before = set([f["id"] for f in response.data]) obj = File.objects.get(pk=1) obj.removed = True obj.force_save() @@ -1143,14 +1337,14 @@ def test_removed_query_param(self): obj2.removed = True obj2.force_save() - response = self.client.get('/rest/datasets/1/files') + response = self.client.get("/rest/datasets/1/files") self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) self.assertEqual(len(response.data), 0) - response = self.client.get('/rest/datasets/1/files?removed_files=true') + response = self.client.get("/rest/datasets/1/files?removed_files=true") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), len(file_ids_before)) - self.assertEqual(file_ids_before, set([ f['id'] for f in response.data ])) + self.assertEqual(file_ids_before, set([f["id"] for f in response.data])) class CatalogRecordApiReadFilesAuthorization(CatalogRecordApiReadCommon): @@ -1164,13 +1358,13 @@ def test_returns_ok_for_open_catalog_record_if_no_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify open dataset /rest/datasets//files returns all the files even without authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_ok_for_open_catalog_record_if_service_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify open dataset /rest/datasets//files returns all the files with service authorization - self._assert_ok(open_cr_json, 'service') + self._assert_ok(open_cr_json, "service") @responses.activate def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): @@ -1178,29 +1372,35 @@ def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) # Verify open owner-owned dataset /rest/datasets//files returns all the files with owner authorization - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") def test_returns_ok_for_restricted_catalog_record_if_service_authorization(self): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify restricted dataset /rest/datasets//files returns all the files with service authorization - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate def test_returns_ok_for_restricted_catalog_record_if_owner_authorization(self): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify restricted owner-owned dataset /rest/datasets//files returns all the files with # owner authorization - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify restricted dataset /rest/datasets//files returns ok when embargo date has been reached without # authorization - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS @@ -1208,25 +1408,31 @@ def test_returns_forbidden_for_restricted_catalog_record_if_no_authorization(sel restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify restricted dataset /rest/datasets//files returns forbidden without authorization - self._assert_forbidden(restricted_cr_json, 'no') + self._assert_forbidden(restricted_cr_json, "no") - def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify restricted dataset /rest/datasets//files returns forbidden when embargo date has not been reached - self._assert_forbidden(not_available_embargoed_cr_json, 'no') + self._assert_forbidden(not_available_embargoed_cr_json, "no") def _assert_forbidden(self, cr_json, credentials_type): - pk = cr_json['id'] + pk = cr_json["id"] self._set_http_authorization(credentials_type) - response = self.client.get('/rest/datasets/{0}/files'.format(pk)) + response = self.client.get("/rest/datasets/{0}/files".format(pk)) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def _assert_ok(self, cr_json, credentials_type): - pk = cr_json['id'] + pk = cr_json["id"] self._set_http_authorization(credentials_type) - rd = cr_json['research_dataset'] - file_amt = len(rd['files']) + sum(int(d['details']['file_count']) for d in rd['directories']) - response = self.client.get('/rest/datasets/{0}/files'.format(pk)) + rd = cr_json["research_dataset"] + file_amt = len(rd["files"]) + sum( + int(d["details"]["file_count"]) for d in rd["directories"] + ) + response = self.client.get("/rest/datasets/{0}/files".format(pk)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), file_amt) diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index 8c67daaf..8a68881e 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -16,7 +16,14 @@ from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import AlternateRecordSet, CatalogRecord, Contract, DataCatalog, Directory, File +from metax_api.models import ( + AlternateRecordSet, + CatalogRecord, + Contract, + DataCatalog, + Directory, + File, +) from metax_api.models.catalog_record import ACCESS_TYPES from metax_api.services import ReferenceDataMixin as RDM from metax_api.services.redis_cache_service import RedisClient @@ -30,6 +37,7 @@ EXT_CATALOG = django_settings.EXT_DATA_CATALOG_IDENTIFIER DFT_CATALOG = django_settings.DFT_DATA_CATALOG_IDENTIFIER + class CatalogRecordApiWriteCommon(APITestCase, TestClassUtils): """ Common class for write tests, inherited by other write test classes @@ -39,19 +47,24 @@ def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord') - self.preferred_identifier = catalog_record_from_test_data['research_dataset']['preferred_identifier'] - self.identifier = catalog_record_from_test_data['identifier'] - self.pk = catalog_record_from_test_data['id'] + call_command("loaddata", test_data_file_path, verbosity=0) + catalog_record_from_test_data = self._get_object_from_test_data("catalogrecord") + self.preferred_identifier = catalog_record_from_test_data["research_dataset"][ + "preferred_identifier" + ] + self.identifier = catalog_record_from_test_data["identifier"] + self.pk = catalog_record_from_test_data["id"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified slightly as approriate for different purposes """ self.cr_test_data = self._get_new_test_cr_data() - self.cr_test_data['research_dataset']['publisher'] = {'@type': 'Organization', 'name': {'und': 'Testaaja'}} - self.cr_test_data['research_dataset']['issued'] = '2010-01-01' + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "name": {"und": "Testaaja"}, + } + self.cr_test_data["research_dataset"]["issued"] = "2010-01-01" self.cr_att_test_data = self._get_new_test_cr_data(cr_index=14, dc_index=5) self.cr_test_data_new_identifier = self._get_new_test_cr_data_with_updated_identifier() @@ -61,11 +74,13 @@ def setUp(self): self._use_http_authorization() def update_record(self, record): - return self.client.put('/rest/datasets/%d' % record['id'], record, format="json") + return self.client.put("/rest/datasets/%d" % record["id"], record, format="json") def get_next_version(self, record): - self.assertEqual('next_dataset_version' in record, True) - response = self.client.get('/rest/datasets/%d' % record['next_dataset_version']['id'], format="json") + self.assertEqual("next_dataset_version" in record, True) + response = self.client.get( + "/rest/datasets/%d" % record["next_dataset_version"]["id"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data @@ -78,45 +93,62 @@ def get_next_version(self, record): # def _get_new_test_cr_data(self, cr_index=0, dc_index=0, c_index=0): - dc = self._get_object_from_test_data('datacatalog', requested_index=dc_index) - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=cr_index) - - if dc['catalog_json']['research_dataset_schema'] == 'ida' and \ - 'remote_resources' in catalog_record_from_test_data['research_dataset']: - self.fail("Cannot generate the requested test catalog record since requested data catalog is indicates ida " - "schema and the requested catalog record is having remote resources, which is not allowed") - - if dc['catalog_json']['research_dataset_schema'] == 'att' and \ - ('files' in catalog_record_from_test_data['research_dataset'] or - 'directories' in catalog_record_from_test_data['research_dataset']): - self.fail("Cannot generate the requested test catalog record since requested data catalog is indicates att " - "schema and the requested catalog record is having files or directories, which is not allowed") - - catalog_record_from_test_data.update({ - "contract": self._get_object_from_test_data('contract', requested_index=c_index), - "data_catalog": dc - }) - catalog_record_from_test_data['research_dataset'].update({ - "creator": [{ - "@type": "Person", - "name": "Teppo Testaaja", - "member_of": { - "@type": "Organization", - "name": {"fi": "Mysterious Organization"} - } - }], - "curator": [{ - "@type": "Person", - "name": "Default Owner", - "member_of": { - "@type": "Organization", - "name": {"fi": "Mysterious Organization"} - } - }] - }) - catalog_record_from_test_data['research_dataset'].pop('preferred_identifier', None) - catalog_record_from_test_data['research_dataset'].pop('metadata_version_identifier', None) - catalog_record_from_test_data.pop('identifier', None) + dc = self._get_object_from_test_data("datacatalog", requested_index=dc_index) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=cr_index + ) + + if ( + dc["catalog_json"]["research_dataset_schema"] == "ida" + and "remote_resources" in catalog_record_from_test_data["research_dataset"] + ): + self.fail( + "Cannot generate the requested test catalog record since requested data catalog is indicates ida " + "schema and the requested catalog record is having remote resources, which is not allowed" + ) + + if dc["catalog_json"]["research_dataset_schema"] == "att" and ( + "files" in catalog_record_from_test_data["research_dataset"] + or "directories" in catalog_record_from_test_data["research_dataset"] + ): + self.fail( + "Cannot generate the requested test catalog record since requested data catalog is indicates att " + "schema and the requested catalog record is having files or directories, which is not allowed" + ) + + catalog_record_from_test_data.update( + { + "contract": self._get_object_from_test_data("contract", requested_index=c_index), + "data_catalog": dc, + } + ) + catalog_record_from_test_data["research_dataset"].update( + { + "creator": [ + { + "@type": "Person", + "name": "Teppo Testaaja", + "member_of": { + "@type": "Organization", + "name": {"fi": "Mysterious Organization"}, + }, + } + ], + "curator": [ + { + "@type": "Person", + "name": "Default Owner", + "member_of": { + "@type": "Organization", + "name": {"fi": "Mysterious Organization"}, + }, + } + ], + } + ) + catalog_record_from_test_data["research_dataset"].pop("preferred_identifier", None) + catalog_record_from_test_data["research_dataset"].pop("metadata_version_identifier", None) + catalog_record_from_test_data.pop("identifier", None) return catalog_record_from_test_data def _get_new_test_cr_data_with_updated_identifier(self): @@ -130,28 +162,43 @@ def _get_new_full_test_ida_cr_data(self): """ Returns one of the fuller generated test datasets """ - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=11) - data_catalog_from_test_data = self._get_object_from_test_data('datacatalog', requested_index=0) - return self._get_new_full_test_cr_data(catalog_record_from_test_data, data_catalog_from_test_data) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=11 + ) + data_catalog_from_test_data = self._get_object_from_test_data( + "datacatalog", requested_index=0 + ) + return self._get_new_full_test_cr_data( + catalog_record_from_test_data, data_catalog_from_test_data + ) def _get_new_full_test_att_cr_data(self): """ Returns one of the fuller generated test datasets """ - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=23) - data_catalog_from_test_data = self._get_object_from_test_data('datacatalog', requested_index=5) - return self._get_new_full_test_cr_data(catalog_record_from_test_data, data_catalog_from_test_data) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=23 + ) + data_catalog_from_test_data = self._get_object_from_test_data( + "datacatalog", requested_index=5 + ) + return self._get_new_full_test_cr_data( + catalog_record_from_test_data, data_catalog_from_test_data + ) def _get_new_full_test_cr_data(self, cr_from_test_data, dc_from_test_data): - cr_from_test_data.update({ - "contract": self._get_object_from_test_data('contract', requested_index=0), - "data_catalog": dc_from_test_data - }) - cr_from_test_data['research_dataset'].pop('metadata_version_identifier') - cr_from_test_data['research_dataset'].pop('preferred_identifier') - cr_from_test_data.pop('identifier') + cr_from_test_data.update( + { + "contract": self._get_object_from_test_data("contract", requested_index=0), + "data_catalog": dc_from_test_data, + } + ) + cr_from_test_data["research_dataset"].pop("metadata_version_identifier") + cr_from_test_data["research_dataset"].pop("preferred_identifier") + cr_from_test_data.pop("identifier") return cr_from_test_data + class CatalogRecordApiWriteCreateTests(CatalogRecordApiWriteCommon): # # @@ -162,49 +209,58 @@ class CatalogRecordApiWriteCreateTests(CatalogRecordApiWriteCommon): # def test_issued_date_is_generated(self): - ''' Issued date is generated for all but harvested catalogs if it doesn't exists ''' + """ Issued date is generated for all but harvested catalogs if it doesn't exists """ dc = DataCatalog.objects.get(pk=2) - dc.catalog_json['identifier'] = IDA_CATALOG # Test with IDA catalog + dc.catalog_json["identifier"] = IDA_CATALOG # Test with IDA catalog dc.force_save() - self.cr_test_data['data_catalog'] = dc.catalog_json - self.cr_test_data['research_dataset'].pop('issued', None) + self.cr_test_data["data_catalog"] = dc.catalog_json + self.cr_test_data["research_dataset"].pop("issued", None) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue('issued' in response.data['research_dataset'], response.data) + self.assertTrue("issued" in response.data["research_dataset"], response.data) def test_create_catalog_record(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = 'this_should_be_overwritten' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "this_should_be_overwritten" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True) - self.assertEqual('metadata_version_identifier' in response.data['research_dataset'], True, - 'metadata_version_identifier should have been generated') - self.assertEqual('preferred_identifier' in response.data['research_dataset'], True, - 'preferred_identifier should have been generated') + self.assertEqual("research_dataset" in response.data.keys(), True) + self.assertEqual( + "metadata_version_identifier" in response.data["research_dataset"], + True, + "metadata_version_identifier should have been generated", + ) + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"], + True, + "preferred_identifier should have been generated", + ) self.assertNotEqual( - self.cr_test_data['research_dataset']['preferred_identifier'], - response.data['research_dataset']['preferred_identifier'], - 'in fairdata catalogs, user is not allowed to set preferred_identifier' + self.cr_test_data["research_dataset"]["preferred_identifier"], + response.data["research_dataset"]["preferred_identifier"], + "in fairdata catalogs, user is not allowed to set preferred_identifier", ) self.assertNotEqual( - response.data['research_dataset']['preferred_identifier'], - response.data['research_dataset']['metadata_version_identifier'], - 'preferred_identifier and metadata_version_identifier should be generated separately' + response.data["research_dataset"]["preferred_identifier"], + response.data["research_dataset"]["metadata_version_identifier"], + "preferred_identifier and metadata_version_identifier should be generated separately", + ) + cr = CatalogRecord.objects.get(pk=response.data["id"]) + self.assertEqual( + cr.date_created >= get_tz_aware_now_without_micros() - timedelta(seconds=5), + True, + "Timestamp should have been updated during object creation", ) - cr = CatalogRecord.objects.get(pk=response.data['id']) - self.assertEqual(cr.date_created >= get_tz_aware_now_without_micros() - timedelta(seconds=5), True, - 'Timestamp should have been updated during object creation') def test_create_catalog_record_as_harvester(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = 'this_should_be_saved' - self.cr_test_data['data_catalog'] = 3 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "this_should_be_saved" + self.cr_test_data["data_catalog"] = 3 + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual( - self.cr_test_data['research_dataset']['preferred_identifier'], - response.data['research_dataset']['preferred_identifier'], - 'in harvested catalogs, user (the harvester) is allowed to set preferred_identifier' + self.cr_test_data["research_dataset"]["preferred_identifier"], + response.data["research_dataset"]["preferred_identifier"], + "in harvested catalogs, user (the harvester) is allowed to set preferred_identifier", ) def test_preferred_identifier_is_checked_also_from_deleted_records(self): @@ -216,81 +272,134 @@ def test_preferred_identifier_is_checked_also_from_deleted_records(self): # dc 3 happens to be harvested catalog, which allows setting pref id cr = CatalogRecord.objects.filter(data_catalog_id=3).first() - response = self.client.delete('/rest/datasets/%d' % cr.id) + response = self.client.delete("/rest/datasets/%d" % cr.id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - self.cr_test_data['research_dataset']['preferred_identifier'] = cr.preferred_identifier - self.cr_test_data['data_catalog'] = 3 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = cr.preferred_identifier + self.cr_test_data["data_catalog"] = 3 + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('already exists' in response.data['research_dataset'][0], True, response.data) + self.assertEqual( + "already exists" in response.data["research_dataset"][0], + True, + response.data, + ) def test_create_catalog_contract_string_identifier(self): - contract_identifier = Contract.objects.first().contract_json['identifier'] - self.cr_test_data['contract'] = contract_identifier - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + contract_identifier = Contract.objects.first().contract_json["identifier"] + self.cr_test_data["contract"] = contract_identifier + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['contract']['identifier'], contract_identifier, response.data) + self.assertEqual( + response.data["contract"]["identifier"], contract_identifier, response.data + ) def test_create_catalog_error_contract_string_identifier_not_found(self): - self.cr_test_data['contract'] = 'doesnotexist' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["contract"] = "doesnotexist" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") # self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Should have raised 404 not found') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('contract' in response.data, True, 'Error should have been about contract not found') + self.assertEqual( + "contract" in response.data, + True, + "Error should have been about contract not found", + ) def test_create_catalog_record_json_validation_error_1(self): """ Ensure the json path of the error is returned along with other details """ - self.cr_test_data['research_dataset']["title"] = 1234456 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["title"] = 1234456 + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data), 2, 'there should be two errors (error_identifier is one of them)') - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should concern the field research_dataset') - self.assertEqual('1234456 is not of type' in response.data['research_dataset'][0], True, response.data) - self.assertEqual('Json path: [\'title\']' in response.data['research_dataset'][0], True, response.data) + self.assertEqual( + len(response.data), + 2, + "there should be two errors (error_identifier is one of them)", + ) + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should concern the field research_dataset", + ) + self.assertEqual( + "1234456 is not of type" in response.data["research_dataset"][0], + True, + response.data, + ) + self.assertEqual( + "Json path: ['title']" in response.data["research_dataset"][0], + True, + response.data, + ) def test_create_catalog_record_json_validation_error_2(self): """ Ensure the json path of the error is returned along with other details also in objects that are deeply nested """ - self.cr_test_data['research_dataset']['provenance'] = [{ - 'title': {'en': 'provenance title'}, - 'was_associated_with': [ - {'@type': 'Person', 'xname': 'seppo'} - ] - }] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["provenance"] = [ + { + "title": {"en": "provenance title"}, + "was_associated_with": [{"@type": "Person", "xname": "seppo"}], + } + ] + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data), 2, 'there should be two errors (error_identifier is one of them)') - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should concern the field research_dataset') - self.assertEqual('is not valid' in response.data['research_dataset'][0], True, response.data) - self.assertEqual('was_associated_with' in response.data['research_dataset'][0], True, response.data) + self.assertEqual( + len(response.data), + 2, + "there should be two errors (error_identifier is one of them)", + ) + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should concern the field research_dataset", + ) + self.assertEqual( + "is not valid" in response.data["research_dataset"][0], True, response.data + ) + self.assertEqual( + "was_associated_with" in response.data["research_dataset"][0], + True, + response.data, + ) def test_create_catalog_record_allowed_projects_ok(self): - response = self.client.post('/rest/datasets?allowed_projects=project_x', self.cr_test_data, format="json") + response = self.client.post( + "/rest/datasets?allowed_projects=project_x", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_create_catalog_record_allowed_projects_fail(self): # dataset file not in allowed projects - response = self.client.post('/rest/datasets?allowed_projects=no,permission', self.cr_test_data, format="json") + response = self.client.post( + "/rest/datasets?allowed_projects=no,permission", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # ensure list is properly handled (separated by comma, end result should be list) - response = self.client.post('/rest/datasets?allowed_projects=no_good_project_x,another', - self.cr_test_data, format="json") + response = self.client.post( + "/rest/datasets?allowed_projects=no_good_project_x,another", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # handle empty value - response = self.client.post('/rest/datasets?allowed_projects=', self.cr_test_data, format="json") + response = self.client.post( + "/rest/datasets?allowed_projects=", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # Other trickery - response = self.client.post('/rest/datasets?allowed_projects=,', self.cr_test_data, format="json") + response = self.client.post( + "/rest/datasets?allowed_projects=,", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # @@ -298,19 +407,25 @@ def test_create_catalog_record_allowed_projects_fail(self): # def test_create_catalog_record_list(self): - response = self.client.post('/rest/datasets', - [self.cr_test_data, self.cr_test_data_new_identifier], format="json") + response = self.client.post( + "/rest/datasets", + [self.cr_test_data, self.cr_test_data_new_identifier], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['success'][0].keys(), True) - self.assertEqual(len(response.data['success']), 2) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["success"][0].keys(), True) + self.assertEqual(len(response.data["success"]), 2) + self.assertEqual(len(response.data["failed"]), 0) def test_create_catalog_record_list_error_one_fails(self): - self.cr_test_data['research_dataset']["title"] = 1234456 - response = self.client.post('/rest/datasets', - [self.cr_test_data, self.cr_test_data_new_identifier], format="json") + self.cr_test_data["research_dataset"]["title"] = 1234456 + response = self.client.post( + "/rest/datasets", + [self.cr_test_data, self.cr_test_data_new_identifier], + format="json", + ) """ List response looks like @@ -329,42 +444,49 @@ def test_create_catalog_record_list_error_one_fails(self): } """ self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual('research_dataset' in response.data['failed'][0]['errors'], True, response.data) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual( + "research_dataset" in response.data["failed"][0]["errors"], + True, + response.data, + ) self.assertEqual( - '1234456 is not of type' in response.data['failed'][0]['errors']['research_dataset'][0], + "1234456 is not of type" in response.data["failed"][0]["errors"]["research_dataset"][0], True, - response.data + response.data, ) self.assertEqual( - 'Json path: [\'title\']' in response.data['failed'][0]['errors']['research_dataset'][0], + "Json path: ['title']" in response.data["failed"][0]["errors"]["research_dataset"][0], True, - response.data + response.data, ) def test_create_catalog_record_list_error_all_fail(self): # data catalog is a required field, should fail - self.cr_test_data['data_catalog'] = None - self.cr_test_data_new_identifier['data_catalog'] = None + self.cr_test_data["data_catalog"] = None + self.cr_test_data_new_identifier["data_catalog"] = None - response = self.client.post('/rest/datasets', - [self.cr_test_data, self.cr_test_data_new_identifier], format="json") + response = self.client.post( + "/rest/datasets", + [self.cr_test_data, self.cr_test_data_new_identifier], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual(len(response.data['success']), 0) - self.assertEqual(len(response.data['failed']), 2) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual(len(response.data["success"]), 0) + self.assertEqual(len(response.data["failed"]), 2) def test_create_catalog_record_editor_field_is_optional(self): - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) new = response.data - new['research_dataset']['title']['en'] = 'updated title' - new.pop('editor') - response = self.client.put('/rest/datasets/%d' % new['id'], new, format="json") + new["research_dataset"]["title"]["en"] = "updated title" + new.pop("editor") + response = self.client.put("/rest/datasets/%d" % new["id"], new, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_parameter_migration_override_preferred_identifier_when_creating(self): @@ -373,11 +495,13 @@ def test_parameter_migration_override_preferred_identifier_when_creating(self): permitted. Using the optional query parameter ?migration_override=bool a custom preferred_identifier can be passed. """ - custom_pid = 'custom-pid-value' - self.cr_test_data['research_dataset']['preferred_identifier'] = custom_pid - response = self.client.post('/rest/datasets?migration_override', self.cr_test_data, format="json") + custom_pid = "custom-pid-value" + self.cr_test_data["research_dataset"]["preferred_identifier"] = custom_pid + response = self.client.post( + "/rest/datasets?migration_override", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], custom_pid) + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], custom_pid) def test_parameter_migration_override_no_preferred_identifier_when_creating(self): """ @@ -385,47 +509,61 @@ def test_parameter_migration_override_no_preferred_identifier_when_creating(self permitted. Using the optional query parameter ?migration_override=bool a custom preferred_identifier can be passed. """ - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets?migration_override', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post( + "/rest/datasets?migration_override", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(len(response.data['research_dataset']['preferred_identifier']) > 0) + self.assertTrue(len(response.data["research_dataset"]["preferred_identifier"]) > 0) - self.cr_test_data['research_dataset'].pop('preferred_identifier', None) - response = self.client.post('/rest/datasets?migration_override', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"].pop("preferred_identifier", None) + response = self.client.post( + "/rest/datasets?migration_override", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(len(response.data['research_dataset']['preferred_identifier']) > 0) + self.assertTrue(len(response.data["research_dataset"]["preferred_identifier"]) > 0) def test_create_catalog_record_using_pid_type(self): # Test with pid_type = urn - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets?pid_type=urn', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('urn:')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/datasets?pid_type=urn", self.cr_test_data, format="json") + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("urn:") + ) # Test with pid_type = doi AND not ida catalog - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets?pid_type=doi', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/datasets?pid_type=doi", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = IDA_CATALOG - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/datacatalogs", dc, format="json") # Test with pid_type = doi AND ida catalog - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - self.cr_test_data['data_catalog'] = IDA_CATALOG - response = self.client.post('/rest/datasets?pid_type=doi', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('doi:10.')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + self.cr_test_data["data_catalog"] = IDA_CATALOG + response = self.client.post("/rest/datasets?pid_type=doi", self.cr_test_data, format="json") + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("doi:10.") + ) # Test with pid_type = not_known - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets?pid_type=not_known', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('urn:')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post( + "/rest/datasets?pid_type=not_known", self.cr_test_data, format="json" + ) + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("urn:") + ) # Test without pid_type - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('urn:')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("urn:") + ) class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): @@ -444,65 +582,91 @@ class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): # create operations # - def test_create_catalog_record_error_preferred_identifier_cant_be_metadata_version_identifier(self): + def test_create_catalog_record_error_preferred_identifier_cant_be_metadata_version_identifier( + self, + ): """ preferred_identifier can never be the same as a metadata_version_identifier in another cr, in any catalog. """ - existing_metadata_version_identifier = CatalogRecord.objects.get(pk=1).metadata_version_identifier - self.cr_test_data['research_dataset']['preferred_identifier'] = existing_metadata_version_identifier + existing_metadata_version_identifier = CatalogRecord.objects.get( + pk=1 + ).metadata_version_identifier + self.cr_test_data["research_dataset"][ + "preferred_identifier" + ] = existing_metadata_version_identifier # setting preferred_identifier is only allowed in harvested catalogs. - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should be about an error in research_dataset') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should be about an error in research_dataset", + ) # the error message should clearly state that the value of preferred_identifier appears in the # field metadata_version_identifier in another record, therefore two asserts - self.assertEqual('preferred_identifier' in response.data['research_dataset'][0], True, - 'The error should be about metadata_version_identifier existing with this identifier') - self.assertEqual('metadata_version_identifier' in response.data['research_dataset'][0], True, - 'The error should be about metadata_version_identifier existing with this identifier') + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"][0], + True, + "The error should be about metadata_version_identifier existing with this identifier", + ) + self.assertEqual( + "metadata_version_identifier" in response.data["research_dataset"][0], + True, + "The error should be about metadata_version_identifier existing with this identifier", + ) - def test_create_catalog_record_error_preferred_identifier_exists_in_same_catalog(self): + def test_create_catalog_record_error_preferred_identifier_exists_in_same_catalog( + self, + ): """ preferred_identifier already existing in the same data catalog is an error """ - self.cr_test_data['research_dataset']['preferred_identifier'] = 'pid_by_harvester' - self.cr_test_data['data_catalog'] = 3 - cr_1 = self.client.post('/rest/datasets', self.cr_test_data, format="json").data + self.cr_test_data["research_dataset"]["preferred_identifier"] = "pid_by_harvester" + self.cr_test_data["data_catalog"] = 3 + cr_1 = self.client.post("/rest/datasets", self.cr_test_data, format="json").data - self.cr_test_data['research_dataset']['preferred_identifier'] = \ - cr_1['research_dataset']['preferred_identifier'] + self.cr_test_data["research_dataset"]["preferred_identifier"] = cr_1["research_dataset"][ + "preferred_identifier" + ] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should be about an error in research_dataset') - self.assertEqual('preferred_identifier' in response.data['research_dataset'][0], True, - 'The error should be about preferred_identifier already existing') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should be about an error in research_dataset", + ) + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"][0], + True, + "The error should be about preferred_identifier already existing", + ) def test_create_catalog_record_preferred_identifier_exists_in_another_catalog(self): """ preferred_identifier existing in another data catalog is not an error. """ unique_identifier = self._set_preferred_identifier_to_record(pk=1, catalog_id=1) - self.cr_test_data['research_dataset']['preferred_identifier'] = unique_identifier + self.cr_test_data["research_dataset"]["preferred_identifier"] = unique_identifier # different catalog, should be OK (not ATT catalog, so preferred_identifier being saved # can exist in other catalogs) - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # # update operations # - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1(self): + def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1( + self, + ): """ preferred_identifier existing in another data catalog is not an error. @@ -515,13 +679,15 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1( cr.data_catalog_id = 3 cr.save() - data = self.client.get('/rest/datasets/3').data - data['research_dataset']['preferred_identifier'] = unique_identifier + data = self.client.get("/rest/datasets/3").data + data["research_dataset"]["preferred_identifier"] = unique_identifier - response = self.client.patch('/rest/datasets/3', data, format="json") + response = self.client.patch("/rest/datasets/3", data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2(self): + def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2( + self, + ): """ preferred_identifier existing in another data catalog is not an error. @@ -534,14 +700,16 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2( """ unique_identifier = self._set_preferred_identifier_to_record(pk=1, catalog_id=1) - data = self.client.get('/rest/datasets/3').data - data['research_dataset']['preferred_identifier'] = unique_identifier - data['data_catalog'] = 3 + data = self.client.get("/rest/datasets/3").data + data["research_dataset"]["preferred_identifier"] = unique_identifier + data["data_catalog"] = 3 - response = self.client.patch('/rest/datasets/3', data, format="json") + response = self.client.patch("/rest/datasets/3", data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, data) - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3(self): + def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3( + self, + ): """ preferred_identifier already existing in the same data catalog is an error, in other catalogs than ATT: Harvester or other catalogs cant contain same @@ -558,28 +726,36 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3( # setup the record in db which will cause conflict unique_identifier = self._set_preferred_identifier_to_record(pk=3, catalog_id=3) - data = {'research_dataset': self.cr_test_data['research_dataset']} - data['research_dataset']['preferred_identifier'] = unique_identifier - data['data_catalog'] = 3 + data = {"research_dataset": self.cr_test_data["research_dataset"]} + data["research_dataset"]["preferred_identifier"] = unique_identifier + data["data_catalog"] = 3 - response = self.client.patch('/rest/datasets/2', data, format="json") + response = self.client.patch("/rest/datasets/2", data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('preferred_identifier' in response.data['research_dataset'][0], True, - 'The error should be about preferred_identifier already existing') + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"][0], + True, + "The error should be about preferred_identifier already existing", + ) def test_remote_doi_dataset_is_validated_against_datacite_format(self): # Remote input DOI ids need to take datasets for datacite validation - cr = {'research_dataset': self.cr_test_data['research_dataset']} - cr['research_dataset']['preferred_identifier'] = 'doi:10.5061/dryad.10188854' - cr['data_catalog'] = 3 - cr['metadata_provider_org'] = 'metax' - cr['metadata_provider_user'] = 'metax' - cr['research_dataset'].pop('publisher', None) - - response = self.client.post('/rest/datasets', cr, format="json") + cr = {"research_dataset": self.cr_test_data["research_dataset"]} + cr["research_dataset"]["preferred_identifier"] = "doi:10.5061/dryad.10188854" + cr["data_catalog"] = 3 + cr["metadata_provider_org"] = "metax" + cr["metadata_provider_user"] = "metax" + cr["research_dataset"].pop("publisher", None) + + response = self.client.post("/rest/datasets", cr, format="json") # Publisher value is required for datacite format, so this should return Http400 self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('a required value for datacite format' in response.data['detail'][0], True, response.data) + self.assertEqual( + "a required value for datacite format" in response.data["detail"][0], + True, + response.data, + ) + # # helpers # @@ -589,14 +765,15 @@ def _set_preferred_identifier_to_record(self, pk=None, catalog_id=None): Set preferred_identifier to an existing record to a value, and return that value, which will then be used by the test to create or update another record. """ - unique_identifier = 'im unique yo' + unique_identifier = "im unique yo" cr = CatalogRecord.objects.get(pk=pk) - cr.research_dataset['preferred_identifier'] = unique_identifier + cr.research_dataset["preferred_identifier"] = unique_identifier cr.data_catalog_id = catalog_id cr.force_save() cr._handle_preferred_identifier_changed() return unique_identifier + class CatalogRecordApiWriteDatasetSchemaSelection(CatalogRecordApiWriteCommon): # # @@ -613,18 +790,18 @@ def setUp(self): def test_catalog_record_with_not_found_json_schema_gets_default_schema(self): # catalog has dataset schema, but it is not found on the server dc = DataCatalog.objects.get(pk=1) - dc.catalog_json['research_dataset_schema'] = 'nonexisting' + dc.catalog_json["research_dataset_schema"] = "nonexisting" dc.save() - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # catalog has no dataset schema at all dc = DataCatalog.objects.get(pk=1) - dc.catalog_json.pop('research_dataset_schema') + dc.catalog_json.pop("research_dataset_schema") dc.save() - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_catalog_record_create_with_other_schema(self): @@ -632,21 +809,23 @@ def test_catalog_record_create_with_other_schema(self): Ensure that dataset json schema validation works with other json schemas than the default IDA """ - self.cr_test_data['research_dataset']['remote_resources'] = [ - {'title': 'title'}, - {'title': 'title'} + self.cr_test_data["research_dataset"]["remote_resources"] = [ + {"title": "title"}, + {"title": "title"}, ] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.cr_test_data['research_dataset']['remote_resources'] = [ - {'title': 'title'}, - {'title': 'title'}, - {'woah': 'this should give a failure, since title is a required field, and it is missing'} + self.cr_test_data["research_dataset"]["remote_resources"] = [ + {"title": "title"}, + {"title": "title"}, + { + "woah": "this should give a failure, since title is a required field, and it is missing" + }, ] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_catalog_record_ref_data_validation_with_other_schema(self): @@ -654,26 +833,27 @@ def test_catalog_record_ref_data_validation_with_other_schema(self): Ensure that dataset reference data validation and population works with other json schemas than the default IDA. Ref data validation should be schema agnostic """ - self.cr_test_data['research_dataset']['other_identifier'] = [ + self.cr_test_data["research_dataset"]["other_identifier"] = [ { - 'notation': 'urn:1', - 'type': { - 'identifier': 'doi', - } + "notation": "urn:1", + "type": { + "identifier": "doi", + }, } ] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual( - 'uri.suomi.fi' in response.data['research_dataset']['other_identifier'][0]['type']['identifier'], + "uri.suomi.fi" + in response.data["research_dataset"]["other_identifier"][0]["type"]["identifier"], True, - 'Identifier type should have been populated with data from ref data' + "Identifier type should have been populated with data from ref data", ) def _set_data_catalog_schema_to_harvester(self): dc = DataCatalog.objects.get(pk=1) - dc.catalog_json['research_dataset_schema'] = 'harvester' + dc.catalog_json["research_dataset_schema"] = "harvester" dc.save() @@ -685,38 +865,50 @@ class CatalogRecordApiWriteUpdateTests(CatalogRecordApiWriteCommon): # def test_update_catalog_record(self): - cr = self.client.get('/rest/datasets/1').data - cr['preservation_description'] = 'what' + cr = self.client.get("/rest/datasets/1").data + cr["preservation_description"] = "what" - response = self.client.put('/rest/datasets/1', cr, format="json") + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['preservation_description'], 'what') + self.assertEqual(response.data["preservation_description"], "what") cr = CatalogRecord.objects.get(pk=1) - self.assertEqual(cr.date_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), True, - 'Timestamp should have been updated during object update') + self.assertEqual( + cr.date_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), + True, + "Timestamp should have been updated during object update", + ) def test_update_catalog_record_error_using_preferred_identifier(self): - cr = self.client.get('/rest/datasets/1').data - response = self.client.put('/rest/datasets/%s' % cr['research_dataset']['preferred_identifier'], - { 'whatever': 123 }, format="json") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, - 'Update operation should return 404 when using preferred_identifier') + cr = self.client.get("/rest/datasets/1").data + response = self.client.put( + "/rest/datasets/%s" % cr["research_dataset"]["preferred_identifier"], + {"whatever": 123}, + format="json", + ) + self.assertEqual( + response.status_code, + status.HTTP_404_NOT_FOUND, + "Update operation should return 404 when using preferred_identifier", + ) def test_update_catalog_record_error_required_fields(self): """ Field 'research_dataset' is missing, which should result in an error, since PUT replaces an object and requires all 'required' fields to be present. """ - cr = self.client.get('/rest/datasets/1').data - cr.pop('research_dataset') - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1").data + cr.pop("research_dataset") + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True, - 'Error for field \'research_dataset\' is missing from response.data') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "Error for field 'research_dataset' is missing from response.data", + ) def test_update_catalog_record_not_found(self): - response = self.client.put('/rest/datasets/doesnotexist', self.cr_test_data, format="json") + response = self.client.put("/rest/datasets/doesnotexist", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_update_catalog_record_contract(self): @@ -725,29 +917,33 @@ def test_update_catalog_record_contract(self): old_contract_id = cr.contract.id # update contract to any different contract - cr_1 = self.client.get('/rest/datasets/%d' % cr.id).data - cr_1['contract'] = Contract.objects.all().exclude(pk=old_contract_id).first().id + cr_1 = self.client.get("/rest/datasets/%d" % cr.id).data + cr_1["contract"] = Contract.objects.all().exclude(pk=old_contract_id).first().id - response = self.client.put('/rest/datasets/%d' % cr.id, cr_1, format="json") + response = self.client.put("/rest/datasets/%d" % cr.id, cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) new_contract_id = CatalogRecord.objects.get(pk=cr.id).contract.id - self.assertNotEqual(old_contract_id, new_contract_id, 'Contract should have changed') + self.assertNotEqual(old_contract_id, new_contract_id, "Contract should have changed") def test_catalog_record_update_allowed_projects_ok(self): - cr_11 = self.client.get('/rest/datasets/11').data - cr_11['preservation_state'] = 0 - cr_11_dir_len = len(cr_11['research_dataset']['directories']) - cr_11['research_dataset']['directories'].pop(1) + cr_11 = self.client.get("/rest/datasets/11").data + cr_11["preservation_state"] = 0 + cr_11_dir_len = len(cr_11["research_dataset"]["directories"]) + cr_11["research_dataset"]["directories"].pop(1) - response = self.client.put('/rest/datasets/11?allowed_projects=project_x', cr_11, format="json") + response = self.client.put( + "/rest/datasets/11?allowed_projects=project_x", cr_11, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['research_dataset']['directories']), cr_11_dir_len - 1) + self.assertEqual(len(response.data["research_dataset"]["directories"]), cr_11_dir_len - 1) def test_catalog_record_update_allowed_projects_fail(self): - cr_1 = self.client.get('/rest/datasets/1').data - cr_1['research_dataset']['files'].pop(0) + cr_1 = self.client.get("/rest/datasets/1").data + cr_1["research_dataset"]["files"].pop(0) - response = self.client.put('/rest/datasets/1?allowed_projects=no,projects', cr_1, format="json") + response = self.client.put( + "/rest/datasets/1?allowed_projects=no,projects", cr_1, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # @@ -755,105 +951,115 @@ def test_catalog_record_update_allowed_projects_fail(self): # def test_catalog_record_update_list(self): - cr_1 = self.client.get('/rest/datasets/1').data - cr_1['preservation_description'] = 'updated description' + cr_1 = self.client.get("/rest/datasets/1").data + cr_1["preservation_description"] = "updated description" - cr_2 = self.client.get('/rest/datasets/2').data - cr_2['preservation_description'] = 'second updated description' + cr_2 = self.client.get("/rest/datasets/2").data + cr_2["preservation_description"] = "second updated description" - response = self.client.put('/rest/datasets', [ cr_1, cr_2 ], format="json") + response = self.client.put("/rest/datasets", [cr_1, cr_2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['success']), 2) + self.assertEqual(len(response.data["success"]), 2) updated_cr = CatalogRecord.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, 'updated description') + self.assertEqual(updated_cr.preservation_description, "updated description") updated_cr = CatalogRecord.objects.get(pk=2) - self.assertEqual(updated_cr.preservation_description, 'second updated description') + self.assertEqual(updated_cr.preservation_description, "second updated description") def test_catalog_record_update_list_error_one_fails(self): - cr_1 = self.client.get('/rest/datasets/1').data - cr_1['preservation_description'] = 'updated description' + cr_1 = self.client.get("/rest/datasets/1").data + cr_1["preservation_description"] = "updated description" # data catalog is a required field, should therefore fail - cr_2 = self.client.get('/rest/datasets/2').data - cr_2.pop('data_catalog', None) + cr_2 = self.client.get("/rest/datasets/2").data + cr_2.pop("data_catalog", None) - response = self.client.put('/rest/datasets', [ cr_1, cr_2 ], format="json") + response = self.client.put("/rest/datasets", [cr_1, cr_2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(isinstance(response.data['success'], list), True, - 'return data should contain key success, which is a list') - self.assertEqual(len(response.data['success']), 1) - self.assertEqual(len(response.data['failed']), 1) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual( + isinstance(response.data["success"], list), + True, + "return data should contain key success, which is a list", + ) + self.assertEqual(len(response.data["success"]), 1) + self.assertEqual(len(response.data["failed"]), 1) updated_cr = CatalogRecord.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, 'updated description') + self.assertEqual(updated_cr.preservation_description, "updated description") def test_catalog_record_update_list_error_key_not_found(self): # does not have identifier key - cr_1 = self.client.get('/rest/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1['research_dataset'].pop('metadata_version_identifier') + cr_1 = self.client.get("/rest/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1["research_dataset"].pop("metadata_version_identifier") - cr_2 = self.client.get('/rest/datasets/2').data - cr_2['preservation_description'] = 'second updated description' + cr_2 = self.client.get("/rest/datasets/2").data + cr_2["preservation_description"] = "second updated description" - response = self.client.put('/rest/datasets', [ cr_1, cr_2 ], format="json") + response = self.client.put("/rest/datasets", [cr_1, cr_2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1) - self.assertEqual(len(response.data['failed']), 1) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1) + self.assertEqual(len(response.data["failed"]), 1) def test_catalog_record_deprecated_and_date_deprecated_cannot_be_set(self): # Test catalog record's deprecated field cannot be set with POST, PUT or PATCH initial_deprecated = True - self.cr_test_data['deprecated'] = initial_deprecated - self.cr_test_data['date_deprecated'] = '2018-01-01T00:00:00' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") - self.assertEqual(response.data['deprecated'], False) - self.assertTrue('date_deprecated' not in response.data) - - response_json = self.client.get('/rest/datasets/1').data - initial_deprecated = response_json['deprecated'] - response_json['deprecated'] = not initial_deprecated - response_json['date_deprecated'] = '2018-01-01T00:00:00' - response = self.client.put('/rest/datasets/1', response_json, format="json") + self.cr_test_data["deprecated"] = initial_deprecated + self.cr_test_data["date_deprecated"] = "2018-01-01T00:00:00" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") + self.assertEqual(response.data["deprecated"], False) + self.assertTrue("date_deprecated" not in response.data) + + response_json = self.client.get("/rest/datasets/1").data + initial_deprecated = response_json["deprecated"] + response_json["deprecated"] = not initial_deprecated + response_json["date_deprecated"] = "2018-01-01T00:00:00" + response = self.client.put("/rest/datasets/1", response_json, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['deprecated'], initial_deprecated) - self.assertTrue('date_deprecated' not in response.data) + self.assertEqual(response.data["deprecated"], initial_deprecated) + self.assertTrue("date_deprecated" not in response.data) - initial_deprecated = self.client.get('/rest/datasets/1').data['deprecated'] - response = self.client.patch('/rest/datasets/1', { 'deprecated': not initial_deprecated }, format="json") + initial_deprecated = self.client.get("/rest/datasets/1").data["deprecated"] + response = self.client.patch( + "/rest/datasets/1", {"deprecated": not initial_deprecated}, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['deprecated'], initial_deprecated) - self.assertTrue('date_deprecated' not in response.data) + self.assertEqual(response.data["deprecated"], initial_deprecated) + self.assertTrue("date_deprecated" not in response.data) def test_catalog_record_date_deprecated_and_date_deprecated_lifecycle(self): # if dataset is deprecated, fixing dataset creates new version ds = CatalogRecord.objects.filter(files__id=1) ds_id = ds[0].identifier - response = self.client.delete('/rest/files/1') + response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/datasets/%s' % ds_id) + response = self.client.get("/rest/datasets/%s" % ds_id) cr = response.data - self.assertTrue(cr['deprecated']) - self.assertTrue(cr['date_deprecated'].startswith('2')) + self.assertTrue(cr["deprecated"]) + self.assertTrue(cr["date_deprecated"].startswith("2")) - response = self.client.post('/rpc/datasets/fix_deprecated?identifier=%s' % ds_id, cr, format="json") + response = self.client.post( + "/rpc/datasets/fix_deprecated?identifier=%s" % ds_id, cr, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(CatalogRecord.objects.get(identifier=ds_id).next_dataset_version.deprecated, False) + self.assertEqual( + CatalogRecord.objects.get(identifier=ds_id).next_dataset_version.deprecated, + False, + ) def test_catalog_record_deprecation_updates_date_modified(self): cr = CatalogRecord.objects.filter(files__id=1) cr_id = cr[0].identifier - response = self.client.delete('/rest/files/1') + response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_depr = CatalogRecord.objects.get(identifier=cr_id) @@ -864,41 +1070,41 @@ def test_change_datacatalog_ATT_to_IDA(self): cr = self._get_new_full_test_att_cr_data() # create ATT data catalog - dc_att = self._get_object_from_test_data('datacatalog', 4) - dc_att['catalog_json']['identifier'] = 'urn:nbn:fi:att:data-catalog-att' - dc_att = self.client.post('/rest/datacatalogs', dc_att, format="json").data + dc_att = self._get_object_from_test_data("datacatalog", 4) + dc_att["catalog_json"]["identifier"] = "urn:nbn:fi:att:data-catalog-att" + dc_att = self.client.post("/rest/datacatalogs", dc_att, format="json").data # create IDA data catalog - dc_ida = self._get_object_from_test_data('datacatalog') - dc_ida['catalog_json']['identifier'] = 'urn:nbn:fi:att:data-catalog-ida' - dc_ida = self.client.post('/rest/datacatalogs', dc_ida, format="json").data + dc_ida = self._get_object_from_test_data("datacatalog") + dc_ida["catalog_json"]["identifier"] = "urn:nbn:fi:att:data-catalog-ida" + dc_ida = self.client.post("/rest/datacatalogs", dc_ida, format="json").data # create ATT catalog record - cr['data_catalog'] = dc_att - cr_att = self.client.post('/rest/datasets', cr, format="json").data + cr["data_catalog"] = dc_att + cr_att = self.client.post("/rest/datasets", cr, format="json").data # change data catalog to IDA - cr_id = cr_att['id'] - cr_att['data_catalog']['id'] = dc_ida['id'] - cr_att['data_catalog']['identifier'] = dc_ida['catalog_json']['identifier'] - cr_ida = self.client.put('/rest/datasets/%d' % cr_id, cr_att, format="json") + cr_id = cr_att["id"] + cr_att["data_catalog"]["id"] = dc_ida["id"] + cr_att["data_catalog"]["identifier"] = dc_ida["catalog_json"]["identifier"] + cr_ida = self.client.put("/rest/datasets/%d" % cr_id, cr_att, format="json") self.assertEqual(cr_ida.status_code, status.HTTP_200_OK, cr_ida) - self.assertTrue(not all(item in cr_ida.data['research_dataset'].keys() for item in - ['remote_resources', 'total_remote_resources_byte_size'])) - self.assertTrue('metadata_version_identifier' in cr_ida.data['research_dataset'].keys()) + self.assertTrue( + not all( + item in cr_ida.data["research_dataset"].keys() + for item in ["remote_resources", "total_remote_resources_byte_size"] + ) + ) + self.assertTrue("metadata_version_identifier" in cr_ida.data["research_dataset"].keys()) - cr_ida.data['research_dataset']['files'] = [ + cr_ida.data["research_dataset"]["files"] = [ { "title": "File metadata title 1", "file_type": { "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", - "pref_label": { - "en": "Text", - "fi": "Teksti", - "und": "Teksti" - } + "pref_label": {"en": "Text", "fi": "Teksti", "und": "Teksti"}, }, "identifier": "pid:urn:1", "use_category": { @@ -907,14 +1113,18 @@ def test_change_datacatalog_ATT_to_IDA(self): "pref_label": { "en": "Source material", "fi": "LƤhdeaineisto", - "und": "LƤhdeaineisto" - } - } - }] - cr_ida = self.client.put('/rest/datasets/%d' % cr_id, cr_ida.data, format="json") + "und": "LƤhdeaineisto", + }, + }, + } + ] + cr_ida = self.client.put("/rest/datasets/%d" % cr_id, cr_ida.data, format="json") self.assertEqual(cr_ida.status_code, status.HTTP_200_OK, cr_ida.data) - self.assertTrue(len(cr_ida.data['research_dataset']['files']) == 1, 'Dataset must contain one file') + self.assertTrue( + len(cr_ida.data["research_dataset"]["files"]) == 1, + "Dataset must contain one file", + ) class CatalogRecordApiWritePartialUpdateTests(CatalogRecordApiWriteCommon): @@ -925,15 +1135,23 @@ class CatalogRecordApiWritePartialUpdateTests(CatalogRecordApiWriteCommon): # def test_update_catalog_record_partial(self): - new_data_catalog = self._get_object_from_test_data('datacatalog', requested_index=1)['id'] + new_data_catalog = self._get_object_from_test_data("datacatalog", requested_index=1)["id"] new_data = { "data_catalog": new_data_catalog, } - response = self.client.patch('/rest/datasets/%s' % self.identifier, new_data, format="json") + response = self.client.patch("/rest/datasets/%s" % self.identifier, new_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True, 'PATCH operation should return full content') - self.assertEqual(response.data['data_catalog']['id'], new_data_catalog, 'Field data_catalog was not updated') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "PATCH operation should return full content", + ) + self.assertEqual( + response.data["data_catalog"]["id"], + new_data_catalog, + "Field data_catalog was not updated", + ) # # update list operations PATCH @@ -941,59 +1159,80 @@ def test_update_catalog_record_partial(self): def test_catalog_record_partial_update_list(self): test_data = {} - test_data['id'] = 1 - test_data['preservation_description'] = 'description' + test_data["id"] = 1 + test_data["preservation_description"] = "description" second_test_data = {} - second_test_data['id'] = 2 - second_test_data['preservation_description'] = 'description 2' + second_test_data["id"] = 2 + second_test_data["preservation_description"] = "description 2" - response = self.client.patch('/rest/datasets', [test_data, second_test_data], format="json") + response = self.client.patch("/rest/datasets", [test_data, second_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data, True, 'response.data should contain list of changed objects') - self.assertEqual(len(response.data), 2, 'response.data should contain 2 changed objects') - self.assertEqual('research_dataset' in response.data['success'][0]['object'], True, - 'response.data should contain full objects') + self.assertEqual( + "success" in response.data, + True, + "response.data should contain list of changed objects", + ) + self.assertEqual(len(response.data), 2, "response.data should contain 2 changed objects") + self.assertEqual( + "research_dataset" in response.data["success"][0]["object"], + True, + "response.data should contain full objects", + ) updated_cr = CatalogRecord.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, 'description') + self.assertEqual(updated_cr.preservation_description, "description") def test_catalog_record_partial_update_list_error_one_fails(self): test_data = {} - test_data['id'] = 1 - test_data['preservation_description'] = 'description' + test_data["id"] = 1 + test_data["preservation_description"] = "description" second_test_data = {} - second_test_data['preservation_state'] = 555 # value not allowed - second_test_data['id'] = 2 + second_test_data["preservation_state"] = 555 # value not allowed + second_test_data["id"] = 2 - response = self.client.patch('/rest/datasets', [test_data, second_test_data], format="json") + response = self.client.patch("/rest/datasets", [test_data, second_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1, 'success list should contain one item') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - self.assertEqual('preservation_state' in response.data['failed'][0]['errors'], True, - response.data['failed'][0]['errors']) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1, "success list should contain one item") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + self.assertEqual( + "preservation_state" in response.data["failed"][0]["errors"], + True, + response.data["failed"][0]["errors"], + ) def test_catalog_record_partial_update_list_error_key_not_found(self): # does not have identifier key test_data = {} - test_data['preservation_state'] = 10 + test_data["preservation_state"] = 10 second_test_data = {} - second_test_data['id'] = 2 - second_test_data['preservation_state'] = 20 + second_test_data["id"] = 2 + second_test_data["preservation_state"] = 20 - response = self.client.patch('/rest/datasets', [test_data, second_test_data], format="json") + response = self.client.patch("/rest/datasets", [test_data, second_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1, 'success list should contain one item') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - self.assertEqual('detail' in response.data['failed'][0]['errors'], True, response.data['failed'][0]['errors']) - self.assertEqual('identifying key' in response.data['failed'][0]['errors']['detail'][0], True, - response.data['failed'][0]['errors']) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1, "success list should contain one item") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + self.assertEqual( + "detail" in response.data["failed"][0]["errors"], + True, + response.data["failed"][0]["errors"], + ) + self.assertEqual( + "identifying key" in response.data["failed"][0]["errors"]["detail"][0], + True, + response.data["failed"][0]["errors"], + ) class CatalogRecordApiWriteDeleteTests(CatalogRecordApiWriteCommon): @@ -1006,7 +1245,7 @@ class CatalogRecordApiWriteDeleteTests(CatalogRecordApiWriteCommon): # def test_delete_catalog_record(self): - url = '/rest/datasets/%s' % self.identifier + url = "/rest/datasets/%s" % self.identifier response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(url) @@ -1014,76 +1253,101 @@ def test_delete_catalog_record(self): try: deleted_catalog_record = CatalogRecord.objects.get(identifier=self.identifier) - raise Exception('Deleted CatalogRecord should not be retrievable from the default objects table') + raise Exception( + "Deleted CatalogRecord should not be retrievable from the default objects table" + ) except CatalogRecord.DoesNotExist: # successful test should go here, instead of raising the expection in try: block pass try: - deleted_catalog_record = CatalogRecord.objects_unfiltered.get(identifier=self.identifier) + deleted_catalog_record = CatalogRecord.objects_unfiltered.get( + identifier=self.identifier + ) except CatalogRecord.DoesNotExist: - raise Exception('Deleted CatalogRecord should not be deleted from the db, but marked as removed') + raise Exception( + "Deleted CatalogRecord should not be deleted from the db, but marked as removed" + ) self.assertEqual(deleted_catalog_record.removed, True) self.assertEqual(deleted_catalog_record.identifier, self.identifier) - self.assertEqual(deleted_catalog_record.date_modified, deleted_catalog_record.date_removed, - 'date_modified should be updated') + self.assertEqual( + deleted_catalog_record.date_modified, + deleted_catalog_record.date_removed, + "date_modified should be updated", + ) def test_delete_catalog_record_error_using_preferred_identifier(self): - url = '/rest/datasets/%s' % self.preferred_identifier + url = "/rest/datasets/%s" % self.preferred_identifier response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_bulk_delete_catalog_record_permissions(self): # create catalog with 'metax' edit permissions and create dataset with this catalog as 'metax' user cr = self._get_new_test_cr_data() - cr.pop('id') - catalog = self._get_object_from_test_data('datacatalog', requested_index=0) - catalog.pop('id') - catalog['catalog_json']['identifier'] = 'metax-catalog' - catalog['catalog_record_services_edit'] = 'metax' - catalog = self.client.post('/rest/datacatalogs', catalog, format="json") - cr['data_catalog'] = {'id': catalog.data['id'], 'identifier': catalog.data['catalog_json']['identifier']} - - self._use_http_authorization(username='metax') - response = self.client.post('/rest/datasets/', cr, format="json") - metax_cr = response.data['id'] + cr.pop("id") + catalog = self._get_object_from_test_data("datacatalog", requested_index=0) + catalog.pop("id") + catalog["catalog_json"]["identifier"] = "metax-catalog" + catalog["catalog_record_services_edit"] = "metax" + catalog = self.client.post("/rest/datacatalogs", catalog, format="json") + cr["data_catalog"] = { + "id": catalog.data["id"], + "identifier": catalog.data["catalog_json"]["identifier"], + } + + self._use_http_authorization(username="metax") + response = self.client.post("/rest/datasets/", cr, format="json") + metax_cr = response.data["id"] # create catalog with 'testuser' edit permissions and create dataset with this catalog as 'testuser' user cr = self._get_new_test_cr_data() - cr.pop('id') - catalog = self._get_object_from_test_data('datacatalog', requested_index=1) - catalog.pop('id') - catalog['catalog_json']['identifier'] = 'testuser-catalog' - catalog['catalog_record_services_edit'] = 'testuser' - catalog = self.client.post('/rest/datacatalogs', catalog, format="json") - cr['data_catalog'] = {'id': catalog.data['id'], 'identifier': catalog.data['catalog_json']['identifier']} - - self._use_http_authorization(username='testuser', password='testuserpassword') - response = self.client.post('/rest/datasets/', cr, format="json") - testuser_cr = response.data['id'] + cr.pop("id") + catalog = self._get_object_from_test_data("datacatalog", requested_index=1) + catalog.pop("id") + catalog["catalog_json"]["identifier"] = "testuser-catalog" + catalog["catalog_record_services_edit"] = "testuser" + catalog = self.client.post("/rest/datacatalogs", catalog, format="json") + cr["data_catalog"] = { + "id": catalog.data["id"], + "identifier": catalog.data["catalog_json"]["identifier"], + } + + self._use_http_authorization(username="testuser", password="testuserpassword") + response = self.client.post("/rest/datasets/", cr, format="json") + testuser_cr = response.data["id"] # after trying to delete as 'testuser' only one catalog is deleted - response = self.client.delete('/rest/datasets', [metax_cr, testuser_cr], format="json") + response = self.client.delete("/rest/datasets", [metax_cr, testuser_cr], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, [testuser_cr]) - response = self.client.post('/rest/datasets/list?pagination=false', [metax_cr, testuser_cr], format="json") + response = self.client.post( + "/rest/datasets/list?pagination=false", + [metax_cr, testuser_cr], + format="json", + ) self.assertTrue(len(response.data), 1) - response = self.client.delete('/rest/datasets', [metax_cr], format="json") + response = self.client.delete("/rest/datasets", [metax_cr], format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.post('/rest/datasets/list?pagination=false', [metax_cr, testuser_cr], format="json") + response = self.client.post( + "/rest/datasets/list?pagination=false", + [metax_cr, testuser_cr], + format="json", + ) self.assertTrue(len(response.data), 1) def test_bulk_delete_catalog_record(self): ids = [1, 2, 3] - identifiers = CatalogRecord.objects.filter(pk__in=[4, 5, 6]).values_list('identifier', flat=True) + identifiers = CatalogRecord.objects.filter(pk__in=[4, 5, 6]).values_list( + "identifier", flat=True + ) for crs in [ids, identifiers]: - response = self.client.delete('/rest/datasets', crs, format="json") + response = self.client.delete("/rest/datasets", crs, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data == [1, 2, 3] or response.data == [4, 5, 6]) - response = self.client.post('/rest/datasets/list?pagination=false', crs, format="json") + response = self.client.post("/rest/datasets/list?pagination=false", crs, format="json") self.assertFalse(response.data) for cr in crs: @@ -1093,21 +1357,24 @@ def test_bulk_delete_catalog_record(self): deleted = CatalogRecord.objects_unfiltered.get(identifier=cr) self.assertEqual(deleted.removed, True) - self.assertEqual(deleted.date_modified, deleted.date_removed, - 'date_modified should be updated') + self.assertEqual( + deleted.date_modified, + deleted.date_removed, + "date_modified should be updated", + ) # failing tests ids = [1000, 2000] - identifiers = ['1000', '2000'] + identifiers = ["1000", "2000"] for crs in [ids, identifiers]: - response = self.client.delete('/rest/datasets', ids, format="json") + response = self.client.delete("/rest/datasets", ids, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) ids = [] - response = self.client.delete('/rest/datasets', ids, format="json") + response = self.client.delete("/rest/datasets", ids, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Received empty list of identifiers' in response.data['detail'][0]) + self.assertTrue("Received empty list of identifiers" in response.data["detail"][0]) class CatalogRecordApiWritePreservationStateTests(CatalogRecordApiWriteCommon): @@ -1121,12 +1388,12 @@ def _create_pas_dataset_from_id(self, id): Helper method to create a pas dataset by updating the given dataset's preservation_state to 80. """ - cr_data = self.client.get('/rest/datasets/%d' % id, format="json").data - self.assertEqual(cr_data['preservation_state'], 0) + cr_data = self.client.get("/rest/datasets/%d" % id, format="json").data + self.assertEqual(cr_data["preservation_state"], 0) # update state to "accepted to pas" -> should create pas version - cr_data['preservation_state'] = 80 - response = self.client.put('/rest/datasets/%d' % id, cr_data, format="json") + cr_data["preservation_state"] = 80 + response = self.client.put("/rest/datasets/%d" % id, cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data @@ -1134,25 +1401,29 @@ def setUp(self): super().setUp() dc = DataCatalog.objects.get(pk=1) catalog_json = dc.catalog_json - catalog_json['identifier'] = django_settings.PAS_DATA_CATALOG_IDENTIFIER - catalog_json['dataset_versioning'] = False + catalog_json["identifier"] = django_settings.PAS_DATA_CATALOG_IDENTIFIER + catalog_json["dataset_versioning"] = False dc = DataCatalog.objects.create( catalog_json=catalog_json, date_created=get_tz_aware_now_without_micros(), - catalog_record_services_create='testuser,api_auth_user,metax', - catalog_record_services_edit='testuser,api_auth_user,metax', - catalog_record_services_read='testuser,api_auth_user,metax' + catalog_record_services_create="testuser,api_auth_user,metax", + catalog_record_services_edit="testuser,api_auth_user,metax", + catalog_record_services_read="testuser,api_auth_user,metax", ) def test_update_catalog_record_pas_state_allowed_value(self): - cr = self.client.get('/rest/datasets/1').data - cr['preservation_state'] = 30 - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1").data + cr["preservation_state"] = 30 + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) cr = CatalogRecord.objects.get(pk=1) - self.assertEqual(cr.preservation_state_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), - True, 'Timestamp should have been updated during object update') + self.assertEqual( + cr.preservation_state_modified + >= get_tz_aware_now_without_micros() - timedelta(seconds=5), + True, + "Timestamp should have been updated during object update", + ) def test_update_pas_state_to_needs_revalidation(self): """ @@ -1167,14 +1438,14 @@ def test_update_pas_state_to_needs_revalidation(self): cr.save() # retrieve record and ensure testing state was set correctly... - cr_data = self.client.get('/rest/datasets/1', format="json").data - self.assertEqual(cr_data['preservation_state'], preservation_state_value) + cr_data = self.client.get("/rest/datasets/1", format="json").data + self.assertEqual(cr_data["preservation_state"], preservation_state_value) # strike and verify - cr_data['research_dataset']['title']['en'] = 'Metadata has been updated on loop %d' % i - response = self.client.put('/rest/datasets/1', cr_data, format="json") + cr_data["research_dataset"]["title"]["en"] = "Metadata has been updated on loop %d" % i + response = self.client.put("/rest/datasets/1", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['preservation_state'], 60) + self.assertEqual(response.data["preservation_state"], 60) def test_prevent_file_changes_when_record_in_pas_process(self): """ @@ -1183,42 +1454,44 @@ def test_prevent_file_changes_when_record_in_pas_process(self): cr = CatalogRecord.objects.get(pk=1) cr.preservation_state = 10 cr.save() - cr_data = self.client.get('/rest/datasets/1', format="json").data - cr_data['research_dataset']['files'].pop(0) - response = self.client.put('/rest/datasets/1', cr_data, format="json") + cr_data = self.client.get("/rest/datasets/1", format="json").data + cr_data["research_dataset"]["files"].pop(0) + response = self.client.put("/rest/datasets/1", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('PAS process' in response.data['detail'][0], True, response.data) + self.assertEqual("PAS process" in response.data["detail"][0], True, response.data) def test_non_pas_dataset_unallowed_preservation_state_values(self): # update non-pas dataset - cr = self.client.get('/rest/datasets/1').data + cr = self.client.get("/rest/datasets/1").data values = [ - 11, # not one of known values - 90, # value not allowed for non-pas datasets + 11, # not one of known values + 90, # value not allowed for non-pas datasets ] for invalid_value in values: - cr['preservation_state'] = invalid_value - response = self.client.put('/rest/datasets/1', cr, format="json") + cr["preservation_state"] = invalid_value + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_pas_dataset_unallowed_preservation_state_values(self): # create pas dataset and update with invalid values - cr = self.client.get('/rest/datasets/1').data - cr['preservation_state'] = 80 - response = self.client.put('/rest/datasets/1', cr, format="json") - cr = self.client.get('/rest/datasets/%d' % response.data['preservation_dataset_version']['id']).data + cr = self.client.get("/rest/datasets/1").data + cr["preservation_state"] = 80 + response = self.client.put("/rest/datasets/1", cr, format="json") + cr = self.client.get( + "/rest/datasets/%d" % response.data["preservation_dataset_version"]["id"] + ).data values = [ 70, # value not allowed for non-pas datasets - 111, # not one of known values - 150 # not one of known values + 111, # not one of known values + 150, # not one of known values ] for invalid_value in values: - cr['preservation_state'] = invalid_value - response = self.client.put('/rest/datasets/1', cr, format="json") + cr["preservation_state"] = invalid_value + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_pas_version_is_created_on_preservation_state_80(self): @@ -1226,35 +1499,48 @@ def test_pas_version_is_created_on_preservation_state_80(self): When preservation_state is updated to 'accepted to pas', a copy should be created into designated PAS catalog. """ - cr_data = self.client.get('/rest/datasets/1', format="json").data - self.assertEqual(cr_data['preservation_state'], 0) + cr_data = self.client.get("/rest/datasets/1", format="json").data + self.assertEqual(cr_data["preservation_state"], 0) origin_dataset = self._create_pas_dataset_from_id(1) - self.assertEqual(origin_dataset['preservation_state'], 0) - self.assertEqual('new_version_created' in origin_dataset, True) - self.assertEqual(origin_dataset['new_version_created']['version_type'], 'pas') - self.assertEqual('preservation_dataset_version' in origin_dataset, True) - self.assertEqual('other_identifier' in origin_dataset['research_dataset'], True) - self.assertEqual(origin_dataset['research_dataset']['other_identifier'][0]['notation'].startswith('doi'), True) + self.assertEqual(origin_dataset["preservation_state"], 0) + self.assertEqual("new_version_created" in origin_dataset, True) + self.assertEqual(origin_dataset["new_version_created"]["version_type"], "pas") + self.assertEqual("preservation_dataset_version" in origin_dataset, True) + self.assertEqual("other_identifier" in origin_dataset["research_dataset"], True) + self.assertEqual( + origin_dataset["research_dataset"]["other_identifier"][0]["notation"].startswith("doi"), + True, + ) # get pas version and verify links and other signature values are there pas_dataset = self.client.get( - '/rest/datasets/%d' % origin_dataset['preservation_dataset_version']['id'], format="json" + "/rest/datasets/%d" % origin_dataset["preservation_dataset_version"]["id"], + format="json", ).data - self.assertEqual(pas_dataset['data_catalog']['identifier'], django_settings.PAS_DATA_CATALOG_IDENTIFIER) - self.assertEqual(pas_dataset['preservation_state'], 80) - self.assertEqual(pas_dataset['preservation_dataset_origin_version']['id'], origin_dataset['id']) self.assertEqual( - pas_dataset['preservation_dataset_origin_version']['preferred_identifier'], - origin_dataset['research_dataset']['preferred_identifier'] + pas_dataset["data_catalog"]["identifier"], + django_settings.PAS_DATA_CATALOG_IDENTIFIER, + ) + self.assertEqual(pas_dataset["preservation_state"], 80) + self.assertEqual( + pas_dataset["preservation_dataset_origin_version"]["id"], + origin_dataset["id"], + ) + self.assertEqual( + pas_dataset["preservation_dataset_origin_version"]["preferred_identifier"], + origin_dataset["research_dataset"]["preferred_identifier"], + ) + self.assertEqual("deprecated" in pas_dataset["preservation_dataset_origin_version"], True) + self.assertEqual("other_identifier" in pas_dataset["research_dataset"], True) + self.assertEqual( + pas_dataset["research_dataset"]["other_identifier"][0]["notation"].startswith("urn"), + True, ) - self.assertEqual('deprecated' in pas_dataset['preservation_dataset_origin_version'], True) - self.assertEqual('other_identifier' in pas_dataset['research_dataset'], True) - self.assertEqual(pas_dataset['research_dataset']['other_identifier'][0]['notation'].startswith('urn'), True) # when pas copy is created, origin_dataset preservation_state should have been set back to 0 - cr_data = self.client.get('/rest/datasets/1', format="json").data - self.assertEqual(cr_data['preservation_state'], 0) + cr_data = self.client.get("/rest/datasets/1", format="json").data + self.assertEqual(cr_data["preservation_state"], 0) def test_origin_dataset_cant_have_multiple_pas_versions(self): """ @@ -1263,35 +1549,43 @@ def test_origin_dataset_cant_have_multiple_pas_versions(self): """ self._create_pas_dataset_from_id(1) - cr_data = { 'preservation_state': 80 } - response = self.client.patch('/rest/datasets/1', cr_data, format="json") + cr_data = {"preservation_state": 80} + response = self.client.patch("/rest/datasets/1", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('already has a PAS version' in response.data['detail'][0], True, response.data) + self.assertEqual( + "already has a PAS version" in response.data["detail"][0], + True, + response.data, + ) def test_dataset_can_be_created_directly_into_pas_catalog(self): """ Datasets that are created directly into PAS catalog should not have any enforced rules about changing preservation_state value. """ - self.cr_test_data['data_catalog'] = django_settings.PAS_DATA_CATALOG_IDENTIFIER - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = django_settings.PAS_DATA_CATALOG_IDENTIFIER + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual( - response.data['research_dataset']['preferred_identifier'].startswith('doi'), + response.data["research_dataset"]["preferred_identifier"].startswith("doi"), True, - response.data['research_dataset']['preferred_identifier'] + response.data["research_dataset"]["preferred_identifier"], ) # when created directly into pas catalog, preservation_state can be updated # to whatever, whenever - ps_values = [ v[0] for v in CatalogRecord.PRESERVATION_STATE_CHOICES ] + ps_values = [v[0] for v in CatalogRecord.PRESERVATION_STATE_CHOICES] for ps in ps_values: - cr_data = { 'preservation_state': ps } - response = self.client.patch('/rest/datasets/%d' % response.data['id'], cr_data, format="json") + cr_data = {"preservation_state": ps} + response = self.client.patch( + "/rest/datasets/%d" % response.data["id"], cr_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr_data = { 'preservation_state': 0 } - response = self.client.patch('/rest/datasets/%d' % response.data['id'], cr_data, format="json") + cr_data = {"preservation_state": 0} + response = self.client.patch( + "/rest/datasets/%d" % response.data["id"], cr_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_dataset_files_can_not_be_changed_in_pas_catalog(self): @@ -1302,44 +1596,53 @@ def test_dataset_files_can_not_be_changed_in_pas_catalog(self): cr = self._create_pas_dataset_from_id(1) pas_dataset = self.client.get( - '/rest/datasets/%d' % cr['preservation_dataset_version']['id'], format="json" + "/rest/datasets/%d" % cr["preservation_dataset_version"]["id"], + format="json", ).data - pas_dataset['research_dataset']['files'].pop(0) + pas_dataset["research_dataset"]["files"].pop(0) - response = self.client.put('/rest/datasets/%d' % pas_dataset['id'], pas_dataset, format="json") + response = self.client.put( + "/rest/datasets/%d" % pas_dataset["id"], pas_dataset, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('Cannot change files in' in response.data['detail'][0], True) + self.assertEqual("Cannot change files in" in response.data["detail"][0], True) def test_pas_dataset_files_equal_origin_dataset(self): """ Ensure set of files in original and pas datasets match exactly, even if more files have been frozen in between. """ - test_file = self._get_object_from_test_data('file', requested_index=0) + test_file = self._get_object_from_test_data("file", requested_index=0) response = self.client.get( - '/rest/directories/files?project=%s&path=/' % test_file['project_identifier'], format="json") + "/rest/directories/files?project=%s&path=/" % test_file["project_identifier"], + format="json", + ) - dir_identifier = response.data['directories'][0]['identifier'] + dir_identifier = response.data["directories"][0]["identifier"] # create dataset where directory along with all of its files are included - cr_data = self.client.get('/rest/datasets/1', format="json").data - cr_data['research_dataset']['directories'] = [{ - 'identifier': dir_identifier, - 'use_category': { 'identifier': 'documentation' } - }] + cr_data = self.client.get("/rest/datasets/1", format="json").data + cr_data["research_dataset"]["directories"] = [ + { + "identifier": dir_identifier, + "use_category": {"identifier": "documentation"}, + } + ] - response = self.client.put('/rest/datasets/1', cr_data, format="json") + response = self.client.put("/rest/datasets/1", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr_id = response.data['next_dataset_version']['id'] + cr_id = response.data["next_dataset_version"]["id"] # now freeze more files into same directory - test_file.update({ - 'file_name': '%s_new' % test_file['file_name'], - 'file_path': '%s_new' % test_file['file_path'], - 'identifier': '%s_new' % test_file['identifier'], - }) - response = self.client.post('/rest/files', test_file, format="json") + test_file.update( + { + "file_name": "%s_new" % test_file["file_name"], + "file_path": "%s_new" % test_file["file_path"], + "identifier": "%s_new" % test_file["identifier"], + } + ) + response = self.client.post("/rest/files", test_file, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # more files have been frozen in the directory, but pas dataset should not have the new frozen file, @@ -1347,8 +1650,12 @@ def test_pas_dataset_files_equal_origin_dataset(self): self._create_pas_dataset_from_id(cr_id) cr = CatalogRecord.objects.get(pk=cr_id) - cr_files = cr.files.filter().order_by('id').values_list('id', flat=True) - cr_pas_files = cr.preservation_dataset_version.files.filter().order_by('id').values_list('id', flat=True) + cr_files = cr.files.filter().order_by("id").values_list("id", flat=True) + cr_pas_files = ( + cr.preservation_dataset_version.files.filter() + .order_by("id") + .values_list("id", flat=True) + ) # note: trying to assert querysets will result in failure. must evaluate the querysets first by iterating them self.assertEqual([f for f in cr_files], [f for f in cr_pas_files]) @@ -1360,12 +1667,15 @@ def test_unfreezing_files_does_not_deprecate_pas_dataset(self): been stored in PAS. """ cr = self._create_pas_dataset_from_id(1) - response = self.client.delete('/rest/files/1', format="json") + response = self.client.delete("/rest/files/1", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/datasets/%d' % cr['preservation_dataset_version']['id'], format="json") + response = self.client.get( + "/rest/datasets/%d" % cr["preservation_dataset_version"]["id"], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['deprecated'], False) + self.assertEqual(response.data["deprecated"], False) class CatalogRecordApiWriteReferenceDataTests(CatalogRecordApiWriteCommon): @@ -1386,26 +1696,28 @@ def test_organization_name_is_required(self): # simple case cr = deepcopy(self.cr_full_ida_test_data) - cr['research_dataset']['curator'] = [{ - '@type': 'Organization', - 'identifier': 'not found!', - # no name! - }] - response = self.client.post('/rest/datasets', cr, format="json") + cr["research_dataset"]["curator"] = [ + { + "@type": "Organization", + "identifier": "not found!", + # no name! + } + ] + response = self.client.post("/rest/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # a more complex case. ensure organizations are found from deep structures cr = deepcopy(self.cr_full_ida_test_data) - org = cr['research_dataset']['provenance'][0]['was_associated_with'][0] - del org['name'] # should cause the error - org['@type'] = 'Organization' - org['identifier'] = 'not found!' - response = self.client.post('/rest/datasets', cr, format="json") + org = cr["research_dataset"]["provenance"][0]["was_associated_with"][0] + del org["name"] # should cause the error + org["@type"] = "Organization" + org["identifier"] = "not found!" + response = self.client.post("/rest/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # try again. should be ok - org['identifier'] = 'http://uri.suomi.fi/codelist/fairdata/organization/code/10076' - response = self.client.post('/rest/datasets', cr, format="json") + org["identifier"] = "http://uri.suomi.fi/codelist/fairdata/organization/code/10076" + response = self.client.post("/rest/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_catalog_record_reference_data_missing_ok(self): @@ -1414,11 +1726,14 @@ def test_catalog_record_reference_data_missing_ok(self): cache for whatever reason, and successfully finish the request """ cache = RedisClient() - cache.delete('reference_data') - self.assertEqual(cache.get('reference_data', master=True), None, - 'cache ref data should be missing after cache.delete()') + cache.delete("reference_data") + self.assertEqual( + cache.get("reference_data", master=True), + None, + "cache ref data should be missing after cache.delete()", + ) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_missing_license_identifier_ok(self): @@ -1426,66 +1741,80 @@ def test_missing_license_identifier_ok(self): Missing license identifier is ok if url is provided. Works on att and ida datasets """ - rd_ida = self.cr_full_ida_test_data['research_dataset'] - rd_ida['access_rights']['license'] = [{ - 'license': "http://a.very.nice.custom/url" - }] - response = self.client.post('/rest/datasets', self.cr_full_ida_test_data, format="json") + rd_ida = self.cr_full_ida_test_data["research_dataset"] + rd_ida["access_rights"]["license"] = [{"license": "http://a.very.nice.custom/url"}] + response = self.client.post("/rest/datasets", self.cr_full_ida_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data['research_dataset']['access_rights']['license'][0]), 1, response.data) + self.assertEqual( + len(response.data["research_dataset"]["access_rights"]["license"][0]), + 1, + response.data, + ) - rd_att = self.cr_full_att_test_data['research_dataset'] - rd_att['access_rights']['license'] = [{ - 'license': "http://also.fine.custom/uri", - 'description': { - 'en': "This is very informative description of this custom license." + rd_att = self.cr_full_att_test_data["research_dataset"] + rd_att["access_rights"]["license"] = [ + { + "license": "http://also.fine.custom/uri", + "description": { + "en": "This is very informative description of this custom license." + }, } - }] - rd_att['remote_resources'][0]['license'] = [{ - 'license': "http://cool.remote.uri", - 'description': { - 'en': "Proof that also remote licenses can be used with custom urls." + ] + rd_att["remote_resources"][0]["license"] = [ + { + "license": "http://cool.remote.uri", + "description": { + "en": "Proof that also remote licenses can be used with custom urls." + }, } - }] - response = self.client.post('/rest/datasets', self.cr_full_att_test_data, format="json") + ] + response = self.client.post("/rest/datasets", self.cr_full_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data['research_dataset']['access_rights']['license'][0]), 2, response.data) - self.assertEqual(len(response.data['research_dataset']['remote_resources'][0]['license'][0]), 2, response.data) + self.assertEqual( + len(response.data["research_dataset"]["access_rights"]["license"][0]), + 2, + response.data, + ) + self.assertEqual( + len(response.data["research_dataset"]["remote_resources"][0]["license"][0]), + 2, + response.data, + ) def test_create_catalog_record_with_invalid_reference_data(self): - rd_ida = self.cr_full_ida_test_data['research_dataset'] - rd_ida['theme'][0]['identifier'] = 'nonexisting' - rd_ida['field_of_science'][0]['identifier'] = 'nonexisting' - rd_ida['language'][0]['identifier'] = 'nonexisting' - rd_ida['access_rights']['access_type']['identifier'] = 'nonexisting' - rd_ida['access_rights']['license'][0]['identifier'] = 'nonexisting' - rd_ida['other_identifier'][0]['type']['identifier'] = 'nonexisting' - rd_ida['spatial'][0]['place_uri']['identifier'] = 'nonexisting' - rd_ida['files'][0]['file_type']['identifier'] = 'nonexisting' - rd_ida['files'][0]['use_category']['identifier'] = 'nonexisting' - rd_ida['infrastructure'][0]['identifier'] = 'nonexisting' - rd_ida['creator'][0]['contributor_role'][0]['identifier'] = 'nonexisting' - rd_ida['curator'][0]['contributor_type'][0]['identifier'] = 'nonexisting' - rd_ida['is_output_of'][0]['funder_type']['identifier'] = 'nonexisting' - rd_ida['directories'][0]['use_category']['identifier'] = 'nonexisting' - rd_ida['relation'][0]['relation_type']['identifier'] = 'nonexisting' - rd_ida['relation'][0]['entity']['type']['identifier'] = 'nonexisting' - rd_ida['provenance'][0]['lifecycle_event']['identifier'] = 'nonexisting' - rd_ida['provenance'][1]['preservation_event']['identifier'] = 'nonexisting' - rd_ida['provenance'][0]['event_outcome']['identifier'] = 'nonexisting' - response = self.client.post('/rest/datasets', self.cr_full_ida_test_data, format="json") + rd_ida = self.cr_full_ida_test_data["research_dataset"] + rd_ida["theme"][0]["identifier"] = "nonexisting" + rd_ida["field_of_science"][0]["identifier"] = "nonexisting" + rd_ida["language"][0]["identifier"] = "nonexisting" + rd_ida["access_rights"]["access_type"]["identifier"] = "nonexisting" + rd_ida["access_rights"]["license"][0]["identifier"] = "nonexisting" + rd_ida["other_identifier"][0]["type"]["identifier"] = "nonexisting" + rd_ida["spatial"][0]["place_uri"]["identifier"] = "nonexisting" + rd_ida["files"][0]["file_type"]["identifier"] = "nonexisting" + rd_ida["files"][0]["use_category"]["identifier"] = "nonexisting" + rd_ida["infrastructure"][0]["identifier"] = "nonexisting" + rd_ida["creator"][0]["contributor_role"][0]["identifier"] = "nonexisting" + rd_ida["curator"][0]["contributor_type"][0]["identifier"] = "nonexisting" + rd_ida["is_output_of"][0]["funder_type"]["identifier"] = "nonexisting" + rd_ida["directories"][0]["use_category"]["identifier"] = "nonexisting" + rd_ida["relation"][0]["relation_type"]["identifier"] = "nonexisting" + rd_ida["relation"][0]["entity"]["type"]["identifier"] = "nonexisting" + rd_ida["provenance"][0]["lifecycle_event"]["identifier"] = "nonexisting" + rd_ida["provenance"][1]["preservation_event"]["identifier"] = "nonexisting" + rd_ida["provenance"][0]["event_outcome"]["identifier"] = "nonexisting" + response = self.client.post("/rest/datasets", self.cr_full_ida_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True) - self.assertEqual(len(response.data['research_dataset']), 19) - - rd_att = self.cr_full_att_test_data['research_dataset'] - rd_att['remote_resources'][0]['license'][0]['identifier'] = 'nonexisting' - rd_att['remote_resources'][1]['resource_type']['identifier'] = 'nonexisting' - rd_att['remote_resources'][0]['use_category']['identifier'] = 'nonexisting' - response = self.client.post('/rest/datasets', self.cr_full_att_test_data, format="json") + self.assertEqual("research_dataset" in response.data.keys(), True) + self.assertEqual(len(response.data["research_dataset"]), 19) + + rd_att = self.cr_full_att_test_data["research_dataset"] + rd_att["remote_resources"][0]["license"][0]["identifier"] = "nonexisting" + rd_att["remote_resources"][1]["resource_type"]["identifier"] = "nonexisting" + rd_att["remote_resources"][0]["use_category"]["identifier"] = "nonexisting" + response = self.client.post("/rest/datasets", self.cr_full_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True) - self.assertEqual(len(response.data['research_dataset']), 3) + self.assertEqual("research_dataset" in response.data.keys(), True) + self.assertEqual(len(response.data["research_dataset"]), 3) def test_create_catalog_record_populate_fields_from_reference_data(self): """ @@ -1497,99 +1826,111 @@ def test_create_catalog_record_populate_fields_from_reference_data(self): """ cache = RedisClient() rf = RDM.get_reference_data(cache) - refdata = rf['reference_data'] - orgdata = rf['organization_data'] + refdata = rf["reference_data"] + orgdata = rf["organization_data"] refs = {} data_types = [ - 'access_type', - 'restriction_grounds', - 'field_of_science', - 'identifier_type', - 'keyword', - 'language', - 'license', - 'location', - 'resource_type', - 'file_type', - 'use_category', - 'research_infra', - 'contributor_role', - 'contributor_type', - 'funder_type', - 'relation_type', - 'lifecycle_event', - 'preservation_event', - 'event_outcome' + "access_type", + "restriction_grounds", + "field_of_science", + "identifier_type", + "keyword", + "language", + "license", + "location", + "resource_type", + "file_type", + "use_category", + "research_infra", + "contributor_role", + "contributor_type", + "funder_type", + "relation_type", + "lifecycle_event", + "preservation_event", + "event_outcome", ] # the values in these selected entries will be used throghout the rest of the test case for dtype in data_types: - if dtype == 'location': - entry = next((obj for obj in refdata[dtype] if obj.get('wkt', False)), None) + if dtype == "location": + entry = next((obj for obj in refdata[dtype] if obj.get("wkt", False)), None) self.assertTrue(entry is not None) else: entry = refdata[dtype][1] refs[dtype] = { - 'code': entry['code'], - 'uri': entry['uri'], - 'label': entry.get('label', None), - 'wkt': entry.get('wkt', None), - 'scheme': entry.get('scheme', None) + "code": entry["code"], + "uri": entry["uri"], + "label": entry.get("label", None), + "wkt": entry.get("wkt", None), + "scheme": entry.get("scheme", None), } - refs['organization'] = { - 'uri': orgdata['organization'][0]['uri'], - 'code': orgdata['organization'][0]['code'], - 'label': orgdata['organization'][0]['label'] + refs["organization"] = { + "uri": orgdata["organization"][0]["uri"], + "code": orgdata["organization"][0]["code"], + "label": orgdata["organization"][0]["label"], } # replace the relations with objects that have only the identifier set with code as value, # to easily check that label was populated (= that it appeared in the dataset after create) # without knowing its original value from the generated test data - rd_ida = self.cr_full_ida_test_data['research_dataset'] - rd_ida['theme'][0] = {'identifier': refs['keyword']['code']} - rd_ida['field_of_science'][0] = {'identifier': refs['field_of_science']['code']} - rd_ida['language'][0] = {'identifier': refs['language']['code']} - rd_ida['access_rights']['access_type'] = {'identifier': refs['access_type']['code']} - rd_ida['access_rights']['restriction_grounds'][0] = {'identifier': refs['restriction_grounds']['code']} - rd_ida['access_rights']['license'][0] = {'identifier': refs['license']['code']} - rd_ida['other_identifier'][0]['type'] = {'identifier': refs['identifier_type']['code']} - rd_ida['spatial'][0]['place_uri'] = {'identifier': refs['location']['code']} - rd_ida['files'][0]['file_type'] = {'identifier': refs['file_type']['code']} - rd_ida['files'][0]['use_category'] = {'identifier': refs['use_category']['code']} - rd_ida['directories'][0]['use_category'] = {'identifier': refs['use_category']['code']} - rd_ida['infrastructure'][0] = {'identifier': refs['research_infra']['code']} - rd_ida['creator'][0]['contributor_role'][0] = {'identifier': refs['contributor_role']['code']} - rd_ida['curator'][0]['contributor_type'][0] = {'identifier': refs['contributor_type']['code']} - rd_ida['is_output_of'][0]['funder_type'] = {'identifier': refs['funder_type']['code']} - rd_ida['relation'][0]['relation_type'] = {'identifier': refs['relation_type']['code']} - rd_ida['relation'][0]['entity']['type'] = {'identifier': refs['resource_type']['code']} - rd_ida['provenance'][0]['lifecycle_event'] = {'identifier': refs['lifecycle_event']['code']} - rd_ida['provenance'][1]['preservation_event'] = {'identifier': refs['preservation_event']['code']} - rd_ida['provenance'][0]['event_outcome'] = {'identifier': refs['event_outcome']['code']} + rd_ida = self.cr_full_ida_test_data["research_dataset"] + rd_ida["theme"][0] = {"identifier": refs["keyword"]["code"]} + rd_ida["field_of_science"][0] = {"identifier": refs["field_of_science"]["code"]} + rd_ida["language"][0] = {"identifier": refs["language"]["code"]} + rd_ida["access_rights"]["access_type"] = {"identifier": refs["access_type"]["code"]} + rd_ida["access_rights"]["restriction_grounds"][0] = { + "identifier": refs["restriction_grounds"]["code"] + } + rd_ida["access_rights"]["license"][0] = {"identifier": refs["license"]["code"]} + rd_ida["other_identifier"][0]["type"] = {"identifier": refs["identifier_type"]["code"]} + rd_ida["spatial"][0]["place_uri"] = {"identifier": refs["location"]["code"]} + rd_ida["files"][0]["file_type"] = {"identifier": refs["file_type"]["code"]} + rd_ida["files"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} + rd_ida["directories"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} + rd_ida["infrastructure"][0] = {"identifier": refs["research_infra"]["code"]} + rd_ida["creator"][0]["contributor_role"][0] = { + "identifier": refs["contributor_role"]["code"] + } + rd_ida["curator"][0]["contributor_type"][0] = { + "identifier": refs["contributor_type"]["code"] + } + rd_ida["is_output_of"][0]["funder_type"] = {"identifier": refs["funder_type"]["code"]} + rd_ida["relation"][0]["relation_type"] = {"identifier": refs["relation_type"]["code"]} + rd_ida["relation"][0]["entity"]["type"] = {"identifier": refs["resource_type"]["code"]} + rd_ida["provenance"][0]["lifecycle_event"] = {"identifier": refs["lifecycle_event"]["code"]} + rd_ida["provenance"][1]["preservation_event"] = { + "identifier": refs["preservation_event"]["code"] + } + rd_ida["provenance"][0]["event_outcome"] = {"identifier": refs["event_outcome"]["code"]} # these have other required fields, so only update the identifier with code - rd_ida['is_output_of'][0]['source_organization'][0]['identifier'] = refs['organization']['code'] - rd_ida['is_output_of'][0]['has_funding_agency'][0]['identifier'] = refs['organization']['code'] - rd_ida['other_identifier'][0]['provider']['identifier'] = refs['organization']['code'] - rd_ida['contributor'][0]['member_of']['identifier'] = refs['organization']['code'] - rd_ida['creator'][0]['member_of']['identifier'] = refs['organization']['code'] - rd_ida['curator'][0]['is_part_of']['identifier'] = refs['organization']['code'] - rd_ida['publisher']['is_part_of']['identifier'] = refs['organization']['code'] - rd_ida['rights_holder'][0]['is_part_of']['identifier'] = refs['organization']['code'] + rd_ida["is_output_of"][0]["source_organization"][0]["identifier"] = refs["organization"][ + "code" + ] + rd_ida["is_output_of"][0]["has_funding_agency"][0]["identifier"] = refs["organization"][ + "code" + ] + rd_ida["other_identifier"][0]["provider"]["identifier"] = refs["organization"]["code"] + rd_ida["contributor"][0]["member_of"]["identifier"] = refs["organization"]["code"] + rd_ida["creator"][0]["member_of"]["identifier"] = refs["organization"]["code"] + rd_ida["curator"][0]["is_part_of"]["identifier"] = refs["organization"]["code"] + rd_ida["publisher"]["is_part_of"]["identifier"] = refs["organization"]["code"] + rd_ida["rights_holder"][0]["is_part_of"]["identifier"] = refs["organization"]["code"] # Other type of reference data populations - orig_wkt_value = rd_ida['spatial'][0]['as_wkt'][0] - rd_ida['spatial'][0]['place_uri']['identifier'] = refs['location']['code'] - rd_ida['spatial'][1]['as_wkt'] = [] - rd_ida['spatial'][1]['place_uri']['identifier'] = refs['location']['code'] + orig_wkt_value = rd_ida["spatial"][0]["as_wkt"][0] + rd_ida["spatial"][0]["place_uri"]["identifier"] = refs["location"]["code"] + rd_ida["spatial"][1]["as_wkt"] = [] + rd_ida["spatial"][1]["place_uri"]["identifier"] = refs["location"]["code"] - response = self.client.post('/rest/datasets', self.cr_full_ida_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_full_ida_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True) + self.assertEqual("research_dataset" in response.data.keys(), True) - new_rd_ida = response.data['research_dataset'] + new_rd_ida = response.data["research_dataset"] self._assert_uri_copied_to_identifier(refs, new_rd_ida) self._assert_label_copied_to_pref_label(refs, new_rd_ida) self._assert_label_copied_to_title(refs, new_rd_ida) @@ -1597,170 +1938,382 @@ def test_create_catalog_record_populate_fields_from_reference_data(self): # Assert if spatial as_wkt field has been populated with a value from ref data which has wkt value having # condition that the user has not given own coordinates in the as_wkt field - self.assertEqual(orig_wkt_value, new_rd_ida['spatial'][0]['as_wkt'][0]) - self.assertEqual(refs['location']['wkt'], new_rd_ida['spatial'][1]['as_wkt'][0]) + self.assertEqual(orig_wkt_value, new_rd_ida["spatial"][0]["as_wkt"][0]) + self.assertEqual(refs["location"]["wkt"], new_rd_ida["spatial"][1]["as_wkt"][0]) # rd from att data catalog - rd_att = self.cr_full_att_test_data['research_dataset'] - rd_att['remote_resources'][1]['resource_type'] = {'identifier': refs['resource_type']['code']} - rd_att['remote_resources'][0]['use_category'] = {'identifier': refs['use_category']['code']} - rd_att['remote_resources'][0]['license'][0] = {'identifier': refs['license']['code']} + rd_att = self.cr_full_att_test_data["research_dataset"] + rd_att["remote_resources"][1]["resource_type"] = { + "identifier": refs["resource_type"]["code"] + } + rd_att["remote_resources"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} + rd_att["remote_resources"][0]["license"][0] = {"identifier": refs["license"]["code"]} # Assert remote resources related reference datas - response = self.client.post('/rest/datasets', self.cr_full_att_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_full_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True) - new_rd_att = response.data['research_dataset'] + self.assertEqual("research_dataset" in response.data.keys(), True) + new_rd_att = response.data["research_dataset"] self._assert_att_remote_resource_items(refs, new_rd_att) def _assert_att_remote_resource_items(self, refs, new_rd): - self.assertEqual(refs['resource_type']['uri'], new_rd['remote_resources'][1]['resource_type']['identifier']) - self.assertEqual(refs['use_category']['uri'], new_rd['remote_resources'][0]['use_category']['identifier']) - self.assertEqual(refs['license']['uri'], new_rd['remote_resources'][0]['license'][0]['identifier']) - self.assertEqual(refs['resource_type']['label'], - new_rd['remote_resources'][1]['resource_type'].get('pref_label', None)) - self.assertEqual(refs['use_category']['label'], - new_rd['remote_resources'][0]['use_category'].get('pref_label', None)) - self.assertEqual(refs['license']['label'], new_rd['remote_resources'][0]['license'][0].get('title', None)) + self.assertEqual( + refs["resource_type"]["uri"], + new_rd["remote_resources"][1]["resource_type"]["identifier"], + ) + self.assertEqual( + refs["use_category"]["uri"], + new_rd["remote_resources"][0]["use_category"]["identifier"], + ) + self.assertEqual( + refs["license"]["uri"], + new_rd["remote_resources"][0]["license"][0]["identifier"], + ) + self.assertEqual( + refs["resource_type"]["label"], + new_rd["remote_resources"][1]["resource_type"].get("pref_label", None), + ) + self.assertEqual( + refs["use_category"]["label"], + new_rd["remote_resources"][0]["use_category"].get("pref_label", None), + ) + self.assertEqual( + refs["license"]["label"], + new_rd["remote_resources"][0]["license"][0].get("title", None), + ) def _assert_uri_copied_to_identifier(self, refs, new_rd): - self.assertEqual(refs['keyword']['uri'], new_rd['theme'][0]['identifier']) - self.assertEqual(refs['field_of_science']['uri'], new_rd['field_of_science'][0]['identifier']) - self.assertEqual(refs['language']['uri'], new_rd['language'][0]['identifier']) - self.assertEqual(refs['access_type']['uri'], new_rd['access_rights']['access_type']['identifier']) - self.assertEqual(refs['restriction_grounds']['uri'], - new_rd['access_rights']['restriction_grounds'][0]['identifier']) - self.assertEqual(refs['license']['uri'], new_rd['access_rights']['license'][0]['identifier']) - self.assertEqual(refs['identifier_type']['uri'], new_rd['other_identifier'][0]['type']['identifier']) - self.assertEqual(refs['location']['uri'], new_rd['spatial'][0]['place_uri']['identifier']) - self.assertEqual(refs['file_type']['uri'], new_rd['files'][0]['file_type']['identifier']) - self.assertEqual(refs['use_category']['uri'], new_rd['files'][0]['use_category']['identifier']) - - self.assertEqual(refs['use_category']['uri'], new_rd['directories'][0]['use_category']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['is_output_of'][0]['source_organization'][0]['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['is_output_of'][0]['has_funding_agency'][0]['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['other_identifier'][0]['provider']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['contributor'][0]['member_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['creator'][0]['member_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['curator'][0]['is_part_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['publisher']['is_part_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['rights_holder'][0]['is_part_of']['identifier']) - self.assertEqual(refs['research_infra']['uri'], new_rd['infrastructure'][0]['identifier']) - self.assertEqual(refs['contributor_role']['uri'], new_rd['creator'][0]['contributor_role'][0]['identifier']) - self.assertEqual(refs['contributor_type']['uri'], new_rd['curator'][0]['contributor_type'][0]['identifier']) - self.assertEqual(refs['funder_type']['uri'], new_rd['is_output_of'][0]['funder_type']['identifier']) - self.assertEqual(refs['relation_type']['uri'], new_rd['relation'][0]['relation_type']['identifier']) - self.assertEqual(refs['resource_type']['uri'], new_rd['relation'][0]['entity']['type']['identifier']) - self.assertEqual(refs['lifecycle_event']['uri'], new_rd['provenance'][0]['lifecycle_event']['identifier']) - self.assertEqual(refs['preservation_event']['uri'], new_rd['provenance'][1]['preservation_event']['identifier']) - self.assertEqual(refs['event_outcome']['uri'], new_rd['provenance'][0]['event_outcome']['identifier']) + self.assertEqual(refs["keyword"]["uri"], new_rd["theme"][0]["identifier"]) + self.assertEqual( + refs["field_of_science"]["uri"], new_rd["field_of_science"][0]["identifier"] + ) + self.assertEqual(refs["language"]["uri"], new_rd["language"][0]["identifier"]) + self.assertEqual( + refs["access_type"]["uri"], + new_rd["access_rights"]["access_type"]["identifier"], + ) + self.assertEqual( + refs["restriction_grounds"]["uri"], + new_rd["access_rights"]["restriction_grounds"][0]["identifier"], + ) + self.assertEqual( + refs["license"]["uri"], new_rd["access_rights"]["license"][0]["identifier"] + ) + self.assertEqual( + refs["identifier_type"]["uri"], + new_rd["other_identifier"][0]["type"]["identifier"], + ) + self.assertEqual(refs["location"]["uri"], new_rd["spatial"][0]["place_uri"]["identifier"]) + self.assertEqual(refs["file_type"]["uri"], new_rd["files"][0]["file_type"]["identifier"]) + self.assertEqual( + refs["use_category"]["uri"], + new_rd["files"][0]["use_category"]["identifier"], + ) + + self.assertEqual( + refs["use_category"]["uri"], + new_rd["directories"][0]["use_category"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["is_output_of"][0]["source_organization"][0]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["is_output_of"][0]["has_funding_agency"][0]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["other_identifier"][0]["provider"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["contributor"][0]["member_of"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], new_rd["creator"][0]["member_of"]["identifier"] + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["curator"][0]["is_part_of"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], new_rd["publisher"]["is_part_of"]["identifier"] + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["rights_holder"][0]["is_part_of"]["identifier"], + ) + self.assertEqual(refs["research_infra"]["uri"], new_rd["infrastructure"][0]["identifier"]) + self.assertEqual( + refs["contributor_role"]["uri"], + new_rd["creator"][0]["contributor_role"][0]["identifier"], + ) + self.assertEqual( + refs["contributor_type"]["uri"], + new_rd["curator"][0]["contributor_type"][0]["identifier"], + ) + self.assertEqual( + refs["funder_type"]["uri"], + new_rd["is_output_of"][0]["funder_type"]["identifier"], + ) + self.assertEqual( + refs["relation_type"]["uri"], + new_rd["relation"][0]["relation_type"]["identifier"], + ) + self.assertEqual( + refs["resource_type"]["uri"], + new_rd["relation"][0]["entity"]["type"]["identifier"], + ) + self.assertEqual( + refs["lifecycle_event"]["uri"], + new_rd["provenance"][0]["lifecycle_event"]["identifier"], + ) + self.assertEqual( + refs["preservation_event"]["uri"], + new_rd["provenance"][1]["preservation_event"]["identifier"], + ) + self.assertEqual( + refs["event_outcome"]["uri"], + new_rd["provenance"][0]["event_outcome"]["identifier"], + ) def _assert_scheme_copied_to_in_scheme(self, refs, new_rd): - self.assertEqual(refs['keyword']['scheme'], new_rd['theme'][0]['in_scheme']) - self.assertEqual(refs['field_of_science']['scheme'], new_rd['field_of_science'][0]['in_scheme']) - self.assertEqual(refs['language']['scheme'], new_rd['language'][0]['in_scheme']) - self.assertEqual(refs['access_type']['scheme'], new_rd['access_rights']['access_type']['in_scheme']) - self.assertEqual(refs['restriction_grounds']['scheme'], - new_rd['access_rights']['restriction_grounds'][0]['in_scheme']) - self.assertEqual(refs['license']['scheme'], new_rd['access_rights']['license'][0]['in_scheme']) - self.assertEqual(refs['identifier_type']['scheme'], new_rd['other_identifier'][0]['type']['in_scheme']) - self.assertEqual(refs['location']['scheme'], new_rd['spatial'][0]['place_uri']['in_scheme']) - self.assertEqual(refs['file_type']['scheme'], new_rd['files'][0]['file_type']['in_scheme']) - self.assertEqual(refs['use_category']['scheme'], new_rd['files'][0]['use_category']['in_scheme']) - - self.assertEqual(refs['use_category']['scheme'], new_rd['directories'][0]['use_category']['in_scheme']) - self.assertEqual(refs['research_infra']['scheme'], new_rd['infrastructure'][0]['in_scheme']) - self.assertEqual(refs['contributor_role']['scheme'], new_rd['creator'][0]['contributor_role']['in_scheme']) - self.assertEqual(refs['contributor_type']['scheme'], new_rd['curator'][0]['contributor_type']['in_scheme']) - self.assertEqual(refs['funder_type']['scheme'], new_rd['is_output_of'][0]['funder_type']['in_scheme']) - self.assertEqual(refs['relation_type']['scheme'], new_rd['relation'][0]['relation_type']['in_scheme']) - self.assertEqual(refs['resource_type']['scheme'], new_rd['relation'][0]['entity']['type']['in_scheme']) - self.assertEqual(refs['lifecycle_event']['scheme'], new_rd['provenance'][0]['lifecycle_event']['in_scheme']) - self.assertEqual(refs['preservation_event']['scheme'], - new_rd['provenance'][1]['preservation_event']['in_scheme']) - self.assertEqual(refs['event_outcome']['scheme'], new_rd['provenance'][0]['event_outcome']['in_scheme']) + self.assertEqual(refs["keyword"]["scheme"], new_rd["theme"][0]["in_scheme"]) + self.assertEqual( + refs["field_of_science"]["scheme"], + new_rd["field_of_science"][0]["in_scheme"], + ) + self.assertEqual(refs["language"]["scheme"], new_rd["language"][0]["in_scheme"]) + self.assertEqual( + refs["access_type"]["scheme"], + new_rd["access_rights"]["access_type"]["in_scheme"], + ) + self.assertEqual( + refs["restriction_grounds"]["scheme"], + new_rd["access_rights"]["restriction_grounds"][0]["in_scheme"], + ) + self.assertEqual( + refs["license"]["scheme"], + new_rd["access_rights"]["license"][0]["in_scheme"], + ) + self.assertEqual( + refs["identifier_type"]["scheme"], + new_rd["other_identifier"][0]["type"]["in_scheme"], + ) + self.assertEqual(refs["location"]["scheme"], new_rd["spatial"][0]["place_uri"]["in_scheme"]) + self.assertEqual(refs["file_type"]["scheme"], new_rd["files"][0]["file_type"]["in_scheme"]) + self.assertEqual( + refs["use_category"]["scheme"], + new_rd["files"][0]["use_category"]["in_scheme"], + ) + + self.assertEqual( + refs["use_category"]["scheme"], + new_rd["directories"][0]["use_category"]["in_scheme"], + ) + self.assertEqual(refs["research_infra"]["scheme"], new_rd["infrastructure"][0]["in_scheme"]) + self.assertEqual( + refs["contributor_role"]["scheme"], + new_rd["creator"][0]["contributor_role"]["in_scheme"], + ) + self.assertEqual( + refs["contributor_type"]["scheme"], + new_rd["curator"][0]["contributor_type"]["in_scheme"], + ) + self.assertEqual( + refs["funder_type"]["scheme"], + new_rd["is_output_of"][0]["funder_type"]["in_scheme"], + ) + self.assertEqual( + refs["relation_type"]["scheme"], + new_rd["relation"][0]["relation_type"]["in_scheme"], + ) + self.assertEqual( + refs["resource_type"]["scheme"], + new_rd["relation"][0]["entity"]["type"]["in_scheme"], + ) + self.assertEqual( + refs["lifecycle_event"]["scheme"], + new_rd["provenance"][0]["lifecycle_event"]["in_scheme"], + ) + self.assertEqual( + refs["preservation_event"]["scheme"], + new_rd["provenance"][1]["preservation_event"]["in_scheme"], + ) + self.assertEqual( + refs["event_outcome"]["scheme"], + new_rd["provenance"][0]["event_outcome"]["in_scheme"], + ) + + def _assert_label_copied_to_pref_label(self, refs, new_rd): + self.assertEqual(refs["keyword"]["label"], new_rd["theme"][0].get("pref_label", None)) + self.assertEqual( + refs["field_of_science"]["label"], + new_rd["field_of_science"][0].get("pref_label", None), + ) + self.assertEqual( + refs["access_type"]["label"], + new_rd["access_rights"]["access_type"].get("pref_label", None), + ) + self.assertEqual( + refs["restriction_grounds"]["label"], + new_rd["access_rights"]["restriction_grounds"][0].get("pref_label", None), + ) + self.assertEqual( + refs["identifier_type"]["label"], + new_rd["other_identifier"][0]["type"].get("pref_label", None), + ) + self.assertEqual( + refs["location"]["label"], + new_rd["spatial"][0]["place_uri"].get("pref_label", None), + ) + self.assertEqual( + refs["file_type"]["label"], + new_rd["files"][0]["file_type"].get("pref_label", None), + ) + self.assertEqual( + refs["use_category"]["label"], + new_rd["files"][0]["use_category"].get("pref_label", None), + ) + self.assertEqual( + refs["use_category"]["label"], + new_rd["directories"][0]["use_category"].get("pref_label", None), + ) - def _assert_label_copied_to_pref_label(self, refs, new_rd): - self.assertEqual(refs['keyword']['label'], new_rd['theme'][0].get('pref_label', None)) - self.assertEqual(refs['field_of_science']['label'], new_rd['field_of_science'][0].get('pref_label', None)) - self.assertEqual(refs['access_type']['label'], new_rd['access_rights']['access_type'].get('pref_label', None)) - self.assertEqual(refs['restriction_grounds']['label'], - new_rd['access_rights']['restriction_grounds'][0].get('pref_label', None)) - self.assertEqual(refs['identifier_type']['label'], - new_rd['other_identifier'][0]['type'].get('pref_label', None)) - self.assertEqual(refs['location']['label'], new_rd['spatial'][0]['place_uri'].get('pref_label', None)) - self.assertEqual(refs['file_type']['label'], new_rd['files'][0]['file_type'].get('pref_label', None)) - self.assertEqual(refs['use_category']['label'], new_rd['files'][0]['use_category'].get('pref_label', None)) - self.assertEqual(refs['use_category']['label'], - new_rd['directories'][0]['use_category'].get('pref_label', None)) - - self.assertEqual(refs['research_infra']['label'], new_rd['infrastructure'][0].get('pref_label', None)) - self.assertEqual(refs['contributor_role']['label'], - new_rd['creator'][0]['contributor_role'][0].get('pref_label', None)) - self.assertEqual(refs['contributor_type']['label'], - new_rd['curator'][0]['contributor_type'][0].get('pref_label', None)) - self.assertEqual(refs['funder_type']['label'], new_rd['is_output_of'][0]['funder_type'].get('pref_label', None)) - self.assertEqual(refs['relation_type']['label'], new_rd['relation'][0]['relation_type'].get('pref_label', None)) - self.assertEqual(refs['resource_type']['label'], - new_rd['relation'][0]['entity']['type'].get('pref_label', None)) - self.assertEqual(refs['lifecycle_event']['label'], - new_rd['provenance'][0]['lifecycle_event'].get('pref_label', None)) - self.assertEqual(refs['preservation_event']['label'], - new_rd['provenance'][1]['preservation_event'].get('pref_label', None)) - self.assertEqual(refs['event_outcome']['label'], - new_rd['provenance'][0]['event_outcome'].get('pref_label', None)) + self.assertEqual( + refs["research_infra"]["label"], + new_rd["infrastructure"][0].get("pref_label", None), + ) + self.assertEqual( + refs["contributor_role"]["label"], + new_rd["creator"][0]["contributor_role"][0].get("pref_label", None), + ) + self.assertEqual( + refs["contributor_type"]["label"], + new_rd["curator"][0]["contributor_type"][0].get("pref_label", None), + ) + self.assertEqual( + refs["funder_type"]["label"], + new_rd["is_output_of"][0]["funder_type"].get("pref_label", None), + ) + self.assertEqual( + refs["relation_type"]["label"], + new_rd["relation"][0]["relation_type"].get("pref_label", None), + ) + self.assertEqual( + refs["resource_type"]["label"], + new_rd["relation"][0]["entity"]["type"].get("pref_label", None), + ) + self.assertEqual( + refs["lifecycle_event"]["label"], + new_rd["provenance"][0]["lifecycle_event"].get("pref_label", None), + ) + self.assertEqual( + refs["preservation_event"]["label"], + new_rd["provenance"][1]["preservation_event"].get("pref_label", None), + ) + self.assertEqual( + refs["event_outcome"]["label"], + new_rd["provenance"][0]["event_outcome"].get("pref_label", None), + ) def _assert_label_copied_to_title(self, refs, new_rd): - required_langs = dict((lang, val) for lang, val in refs['language']['label'].items() - if lang in ['fi', 'sv', 'en', 'und']) - self.assertEqual(required_langs, new_rd['language'][0].get('title', None)) - self.assertEqual(refs['license']['label'], new_rd['access_rights']['license'][0].get('title', None)) + required_langs = dict( + (lang, val) + for lang, val in refs["language"]["label"].items() + if lang in ["fi", "sv", "en", "und"] + ) + self.assertEqual(required_langs, new_rd["language"][0].get("title", None)) + self.assertEqual( + refs["license"]["label"], + new_rd["access_rights"]["license"][0].get("title", None), + ) def _assert_label_copied_to_name(self, refs, new_rd): - self.assertEqual(refs['organization']['label'], - new_rd['is_output_of'][0]['source_organization'][0].get('name', None)) - self.assertEqual(refs['organization']['label'], - new_rd['is_output_of'][0]['has_funding_agency'][0].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['other_identifier'][0]['provider'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['contributor'][0]['member_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['creator'][0]['member_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['curator'][0]['is_part_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['publisher']['is_part_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['rights_holder'][0]['is_part_of'].get('name', None)) + self.assertEqual( + refs["organization"]["label"], + new_rd["is_output_of"][0]["source_organization"][0].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["is_output_of"][0]["has_funding_agency"][0].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["other_identifier"][0]["provider"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["contributor"][0]["member_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["creator"][0]["member_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["curator"][0]["is_part_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["publisher"]["is_part_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["rights_holder"][0]["is_part_of"].get("name", None), + ) def test_refdata_sub_org_main_org_population(self): # Test parent org gets populated when sub org is from ref data and user has not provided is_part_of relation - self.cr_test_data['research_dataset']['publisher'] = {'@type': 'Organization', 'identifier': '10076-A800'} - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "identifier": "10076-A800", + } + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('is_part_of' in response.data['research_dataset']['publisher'], True) - self.assertEqual('http://uri.suomi.fi/codelist/fairdata/organization/code/10076', - response.data['research_dataset']['publisher']['is_part_of']['identifier']) - self.assertTrue(response.data['research_dataset']['publisher']['is_part_of'].get('name', False)) + self.assertEqual("is_part_of" in response.data["research_dataset"]["publisher"], True) + self.assertEqual( + "http://uri.suomi.fi/codelist/fairdata/organization/code/10076", + response.data["research_dataset"]["publisher"]["is_part_of"]["identifier"], + ) + self.assertTrue( + response.data["research_dataset"]["publisher"]["is_part_of"].get("name", False) + ) # Test parent org does not get populated when sub org is from ref data and user has provided is_part_of relation - self.cr_test_data['research_dataset']['publisher'] = { - '@type': 'Organization', - 'identifier': '10076-A800', - 'is_part_of': { - '@type': 'Organization', - 'identifier': 'test_id', - 'name': {'und': 'test_name'} - }} - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "identifier": "10076-A800", + "is_part_of": { + "@type": "Organization", + "identifier": "test_id", + "name": {"und": "test_name"}, + }, + } + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('is_part_of' in response.data['research_dataset']['publisher'], True) - self.assertEqual('test_id', response.data['research_dataset']['publisher']['is_part_of']['identifier']) - self.assertEqual('test_name', response.data['research_dataset']['publisher']['is_part_of']['name']['und']) + self.assertEqual("is_part_of" in response.data["research_dataset"]["publisher"], True) + self.assertEqual( + "test_id", + response.data["research_dataset"]["publisher"]["is_part_of"]["identifier"], + ) + self.assertEqual( + "test_name", + response.data["research_dataset"]["publisher"]["is_part_of"]["name"]["und"], + ) # Test nothing happens when org is a parent org - self.cr_test_data['research_dataset']['publisher'] = {'@type': 'Organization', 'identifier': '10076'} - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "identifier": "10076", + } + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('is_part_of' not in response.data['research_dataset']['publisher'], True) + self.assertEqual("is_part_of" not in response.data["research_dataset"]["publisher"], True) class CatalogRecordApiWriteAlternateRecords(CatalogRecordApiWriteCommon): @@ -1777,8 +2330,8 @@ class CatalogRecordApiWriteAlternateRecords(CatalogRecordApiWriteCommon): def setUp(self): super(CatalogRecordApiWriteAlternateRecords, self).setUp() self.preferred_identifier = self._set_preferred_identifier_to_record(pk=1, data_catalog=1) - self.cr_test_data['research_dataset']['preferred_identifier'] = self.preferred_identifier - self.cr_test_data['data_catalog'] = None + self.cr_test_data["research_dataset"]["preferred_identifier"] = self.preferred_identifier + self.cr_test_data["data_catalog"] = None def test_alternate_record_set_is_created_if_it_doesnt_exist(self): """ @@ -1787,21 +2340,29 @@ def test_alternate_record_set_is_created_if_it_doesnt_exist(self): """ # new record is saved to catalog 3, which does not support versioning - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 existing_records_count = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}).count() - self.assertEqual(existing_records_count, 1, - 'in the beginning, there should be only one record with pref id %s' - % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ).count() + self.assertEqual( + existing_records_count, + 1, + "in the beginning, there should be only one record with pref id %s" + % self.preferred_identifier, + ) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) records = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(len(records), 2, - 'after, there should be two records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual( + len(records), + 2, + "after, there should be two records with pref id %s" % self.preferred_identifier, + ) # both records are moved to same set ars_id = records[0].alternate_record_set.id @@ -1810,7 +2371,7 @@ def test_alternate_record_set_is_created_if_it_doesnt_exist(self): # records in the set are the ones expected self.assertEqual(records[0].id, 1) - self.assertEqual(records[1].id, response.data['id']) + self.assertEqual(records[1].id, response.data["id"]) # records in the set are indeed in different catalogs self.assertEqual(records[0].data_catalog.id, 1) @@ -1823,20 +2384,29 @@ def test_append_to_existing_alternate_record_set_if_it_exists(self): to the existing alternate_record_set. """ self._set_preferred_identifier_to_record(pk=2, data_catalog=2) - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 existing_records_count = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}).count() - self.assertEqual(existing_records_count, 2, - 'in the beginning, there should be two records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ).count() + self.assertEqual( + existing_records_count, + 2, + "in the beginning, there should be two records with pref id %s" + % self.preferred_identifier, + ) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) records = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(len(records), 3, - 'after, there should be three records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual( + len(records), + 3, + "after, there should be three records with pref id %s" % self.preferred_identifier, + ) # all records belong to same set ars_id = records[0].alternate_record_set.id @@ -1847,7 +2417,7 @@ def test_append_to_existing_alternate_record_set_if_it_exists(self): # records in the set are the ones expected self.assertEqual(records[0].id, 1) self.assertEqual(records[1].id, 2) - self.assertEqual(records[2].id, response.data['id']) + self.assertEqual(records[2].id, response.data["id"]) # records in the set are indeed in different catalogs self.assertEqual(records[0].data_catalog.id, 1) @@ -1864,15 +2434,18 @@ def test_record_is_removed_from_alternate_record_set_when_deleted(self): # initial conditions will have 3 records in the same set. self._set_and_ensure_initial_conditions() - response = self.client.delete('/rest/datasets/2', format="json") + response = self.client.delete("/rest/datasets/2", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) # check resulting conditions records = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) self.assertEqual(records[0].alternate_record_set.records.count(), 2) - def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_and_one_record_left(self): + def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_and_one_record_left( + self, + ): """ Same as above, but updating a record in a catalog, which does NOT support versioning. In this case, the the records itself gets updated, and removed from the old alternate_record_set. @@ -1889,22 +2462,28 @@ def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_a old_ars_id = CatalogRecord.objects.get(pk=2).alternate_record_set.id # retrieve record id=2, and change its preferred identifier - response = self.client.get('/rest/datasets/2', format="json") - data = {'research_dataset': response.data['research_dataset']} - data['research_dataset']['preferred_identifier'] = 'a:new:identifier:here' + response = self.client.get("/rest/datasets/2", format="json") + data = {"research_dataset": response.data["research_dataset"]} + data["research_dataset"]["preferred_identifier"] = "a:new:identifier:here" # updating preferred_identifier - a new version is NOT created - response = self.client.patch('/rest/datasets/2', data=data, format="json") + response = self.client.patch("/rest/datasets/2", data=data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) records = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': original_preferred_identifier }) + research_dataset__contains={"preferred_identifier": original_preferred_identifier} + ) self.assertEqual(records.count(), 1) - with self.assertRaises(AlternateRecordSet.DoesNotExist, msg='alternate record set should have been deleted'): + with self.assertRaises( + AlternateRecordSet.DoesNotExist, + msg="alternate record set should have been deleted", + ): AlternateRecordSet.objects.get(pk=old_ars_id) - def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_left(self): + def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_left( + self, + ): """ Same princible as above, but through deleting a record, instead of updating a record. @@ -1913,14 +2492,18 @@ def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_ self._set_preferred_identifier_to_record(pk=2, data_catalog=2) old_ars_id = CatalogRecord.objects.get(pk=2).alternate_record_set.id - response = self.client.delete('/rest/datasets/2', format="json") + response = self.client.delete("/rest/datasets/2", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) records = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(records.count(), 1, 'should be only record with this identifier left now') + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual(records.count(), 1, "should be only record with this identifier left now") - with self.assertRaises(AlternateRecordSet.DoesNotExist, msg='alternate record set should have been deleted'): + with self.assertRaises( + AlternateRecordSet.DoesNotExist, + msg="alternate record set should have been deleted", + ): AlternateRecordSet.objects.get(pk=old_ars_id) def test_alternate_record_set_is_included_in_responses(self): @@ -1928,63 +2511,55 @@ def test_alternate_record_set_is_included_in_responses(self): Details of a dataset should contain field alternate_record_set in it. For a particular record, the set should not contain its own metadata_version_identifier in the set. """ - self.cr_test_data['data_catalog'] = 3 - msg_self_should_not_be_listed = 'identifier of the record itself should not be listed' + self.cr_test_data["data_catalog"] = 3 + msg_self_should_not_be_listed = "identifier of the record itself should not be listed" - response_1 = self.client.post('/rest/datasets', self.cr_test_data, format="json") - response_2 = self.client.get('/rest/datasets/1', format="json") + response_1 = self.client.post("/rest/datasets", self.cr_test_data, format="json") + response_2 = self.client.get("/rest/datasets/1", format="json") self.assertEqual(response_1.status_code, status.HTTP_201_CREATED) - self.assertEqual('alternate_record_set' in response_1.data, True) + self.assertEqual("alternate_record_set" in response_1.data, True) self.assertEqual( - response_1.data['identifier'] - not in response_1.data['alternate_record_set'], + response_1.data["identifier"] not in response_1.data["alternate_record_set"], True, - msg_self_should_not_be_listed + msg_self_should_not_be_listed, ) self.assertEqual( - response_2.data['identifier'] - in response_1.data['alternate_record_set'], - True + response_2.data["identifier"] in response_1.data["alternate_record_set"], + True, ) - self.cr_test_data.update({'data_catalog': 4}) - response_3 = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data.update({"data_catalog": 4}) + response_3 = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response_3.status_code, status.HTTP_201_CREATED) - self.assertEqual('alternate_record_set' in response_3.data, True) + self.assertEqual("alternate_record_set" in response_3.data, True) self.assertEqual( - response_1.data['identifier'] - in response_3.data['alternate_record_set'], - True + response_1.data["identifier"] in response_3.data["alternate_record_set"], + True, ) self.assertEqual( - response_2.data['identifier'] - in response_3.data['alternate_record_set'], - True + response_2.data["identifier"] in response_3.data["alternate_record_set"], + True, ) self.assertEqual( - response_3.data['identifier'] - not in response_3.data['alternate_record_set'], + response_3.data["identifier"] not in response_3.data["alternate_record_set"], True, - msg_self_should_not_be_listed + msg_self_should_not_be_listed, ) - response_2 = self.client.get('/rest/datasets/1', format="json") - self.assertEqual('alternate_record_set' in response_2.data, True) + response_2 = self.client.get("/rest/datasets/1", format="json") + self.assertEqual("alternate_record_set" in response_2.data, True) self.assertEqual( - response_1.data['identifier'] - in response_2.data['alternate_record_set'], - True + response_1.data["identifier"] in response_2.data["alternate_record_set"], + True, ) self.assertEqual( - response_3.data['identifier'] - in response_2.data['alternate_record_set'], - True + response_3.data["identifier"] in response_2.data["alternate_record_set"], + True, ) self.assertEqual( - response_2.data['identifier'] - not in response_2.data['alternate_record_set'], + response_2.data["identifier"] not in response_2.data["alternate_record_set"], True, - msg_self_should_not_be_listed + msg_self_should_not_be_listed, ) def _set_preferred_identifier_to_record(self, pk=1, data_catalog=1): @@ -1995,9 +2570,9 @@ def _set_preferred_identifier_to_record(self, pk=1, data_catalog=1): Note that if calling this method several times, this will also create an alternate_record_set (by calling _handle_preferred_identifier_changed()). """ - unique_identifier = 'im unique yo' + unique_identifier = "im unique yo" cr = CatalogRecord.objects.get(pk=pk) - cr.research_dataset['preferred_identifier'] = unique_identifier + cr.research_dataset["preferred_identifier"] = unique_identifier cr.data_catalog_id = data_catalog cr.force_save() cr._handle_preferred_identifier_changed() @@ -2015,9 +2590,14 @@ def _set_and_ensure_initial_conditions(self): # ensuring initial conditions... records = CatalogRecord.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(len(records), 3, - 'in the beginning, there should be three records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual( + len(records), + 3, + "in the beginning, there should be three records with pref id %s" + % self.preferred_identifier, + ) ars_id = records[0].alternate_record_set.id self.assertEqual(records[0].alternate_record_set.id, ars_id) self.assertEqual(records[1].alternate_record_set.id, ars_id) @@ -2038,55 +2618,61 @@ def test_update_from_0_to_n_files_does_not_create_new_version(self): The FIRST update from 0 to n files in a dataset should be permitted without creating a new dataset version. """ - data = self.client.get('/rest/datasets/1', format="json").data - data.pop('id') - data.pop('identifier') - data['research_dataset'].pop('preferred_identifier', None) - files = data['research_dataset'].pop('files', None) - data['research_dataset'].pop('directories', None) + data = self.client.get("/rest/datasets/1", format="json").data + data.pop("id") + data.pop("identifier") + data["research_dataset"].pop("preferred_identifier", None) + files = data["research_dataset"].pop("files", None) + data["research_dataset"].pop("directories", None) self.assertEqual(isinstance(files, list), True) # create test record - response = self.client.post('/rest/datasets', data, format="json") + response = self.client.post("/rest/datasets", data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # modify a few times to create metadata versions data = response.data - data['research_dataset']['title']['en'] = 'updated' - data = self.client.put('/rest/datasets/%d' % data['id'], data, format="json").data - data['research_dataset']['title']['en'] = 'updated again' - data = self.client.put('/rest/datasets/%d' % data['id'], data, format="json").data + data["research_dataset"]["title"]["en"] = "updated" + data = self.client.put("/rest/datasets/%d" % data["id"], data, format="json").data + data["research_dataset"]["title"]["en"] = "updated again" + data = self.client.put("/rest/datasets/%d" % data["id"], data, format="json").data - response = self.client.get('/rest/datasets', format="json") - dataset_count_beginning = response.data['count'] + response = self.client.get("/rest/datasets", format="json") + dataset_count_beginning = response.data["count"] # add files for the first time - should not create a new dataset version - data['research_dataset']['files'] = files - response = self.client.put('/rest/datasets/%d' % data['id'], data, format="json") + data["research_dataset"]["files"] = files + response = self.client.put("/rest/datasets/%d" % data["id"], data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('new_version_created' in response.data, False) + self.assertEqual("new_version_created" in response.data, False) # ensure no "ghost datasets" are created as residue - response = self.client.get('/rest/datasets', format="json") - self.assertEqual(response.data['count'], dataset_count_beginning, 'no new datasets should be created') + response = self.client.get("/rest/datasets", format="json") + self.assertEqual( + response.data["count"], + dataset_count_beginning, + "no new datasets should be created", + ) # remove files again... a new version is created normally - files = data['research_dataset'].pop('files') - response = self.client.put('/rest/datasets/%d' % data['id'], data, format="json") + files = data["research_dataset"].pop("files") + response = self.client.put("/rest/datasets/%d" % data["id"], data, format="json") new_version = self.get_next_version(response.data) - response = self.client.get('/rest/datasets', format="json") - self.assertEqual(response.data['count'], dataset_count_beginning + 1) + response = self.client.get("/rest/datasets", format="json") + self.assertEqual(response.data["count"], dataset_count_beginning + 1) # ...and put the files back. this is another 0->n files update. this time # should normally create new dataset version. - new_version['research_dataset']['files'] = files - response = self.client.put('/rest/datasets/%d' % new_version['id'], new_version, format="json") + new_version["research_dataset"]["files"] = files + response = self.client.put( + "/rest/datasets/%d" % new_version["id"], new_version, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('new_version_created' in response.data, True) + self.assertEqual("new_version_created" in response.data, True) - response = self.client.get('/rest/datasets', format="json") - self.assertEqual(response.data['count'], dataset_count_beginning + 2) + response = self.client.get("/rest/datasets", format="json") + self.assertEqual(response.data["count"], dataset_count_beginning + 2) def test_update_to_non_versioning_catalog_does_not_create_version(self): self._set_cr_to_catalog(pk=self.pk, dc=3) @@ -2094,18 +2680,22 @@ def test_update_to_non_versioning_catalog_does_not_create_version(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_metadata_version_count(response.data, 0) - def test_update_to_versioning_catalog_with_preserve_version_parameter_does_not_create_version(self): + def test_update_to_versioning_catalog_with_preserve_version_parameter_does_not_create_version( + self, + ): self._set_cr_to_catalog(pk=self.pk, dc=1) - response = self._get_and_update_title(self.pk, params='?preserve_version') + response = self._get_and_update_title(self.pk, params="?preserve_version") self._assert_metadata_version_count(response.data, 0) def test_preserve_version_parameter_does_not_allow_file_changes(self): self._set_cr_to_catalog(pk=self.pk, dc=1) - data = self.client.get('/rest/datasets/%d' % self.pk, format="json").data - data['research_dataset']['files'][0]['identifier'] = 'pid:urn:11' - response = self.client.put('/rest/datasets/%d?preserve_version' % self.pk, data, format="json") + data = self.client.get("/rest/datasets/%d" % self.pk, format="json").data + data["research_dataset"]["files"][0]["identifier"] = "pid:urn:11" + response = self.client.put( + "/rest/datasets/%d?preserve_version" % self.pk, data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('not supported' in response.data['detail'][0], True, response.data) + self.assertEqual("not supported" in response.data["detail"][0], True, response.data) def test_update_rd_title_creates_new_metadata_version(self): """ @@ -2116,85 +2706,113 @@ def test_update_rd_title_creates_new_metadata_version(self): self._assert_metadata_version_count(response_1.data, 2) # get list of metadata versions to access contents... - response = self.client.get('/rest/datasets/%d/metadata_versions' % response_1.data['id'], format="json") + response = self.client.get( + "/rest/datasets/%d/metadata_versions" % response_1.data["id"], format="json" + ) - response_2 = self.client.get('/rest/datasets/%d/metadata_versions/%s' % - (self.pk, response.data[0]['metadata_version_identifier']), format="json") + response_2 = self.client.get( + "/rest/datasets/%d/metadata_versions/%s" + % (self.pk, response.data[0]["metadata_version_identifier"]), + format="json", + ) self.assertEqual(response_2.status_code, status.HTTP_200_OK, response_2.data) - self.assertEqual('preferred_identifier' in response_2.data, True) + self.assertEqual("preferred_identifier" in response_2.data, True) # note! response_1 == cr, response_2 == rd - self.assertEqual(response_1.data['research_dataset']['preferred_identifier'], - response_2.data['preferred_identifier']) + self.assertEqual( + response_1.data["research_dataset"]["preferred_identifier"], + response_2.data["preferred_identifier"], + ) def test_changing_files_creates_new_dataset_version(self): - cr = self.client.get('/rest/datasets/1').data - cr['research_dataset']['files'].pop(0) - response = self.client.put('/rest/datasets/1', cr, format="json") - self.assertEqual('next_dataset_version' in response.data, True) - self.assertEqual('new_version_created' in response.data, True) - self.assertEqual('dataset_version_set' in response.data, True) + cr = self.client.get("/rest/datasets/1").data + cr["research_dataset"]["files"].pop(0) + response = self.client.put("/rest/datasets/1", cr, format="json") + self.assertEqual("next_dataset_version" in response.data, True) + self.assertEqual("new_version_created" in response.data, True) + self.assertEqual("dataset_version_set" in response.data, True) def test_dataset_version_lists_removed_records(self): # create new version - cr = self.client.get('/rest/datasets/1').data - cr['research_dataset']['files'].pop(0) - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1").data + cr["research_dataset"]["files"].pop(0) + response = self.client.put("/rest/datasets/1", cr, format="json") # delete the new version - new_ver = response.data['next_dataset_version'] - response = self.client.delete('/rest/datasets/%d' % new_ver['id'], format="json") + new_ver = response.data["next_dataset_version"] + response = self.client.delete("/rest/datasets/%d" % new_ver["id"], format="json") # check deleted record is listed - response = self.client.get('/rest/datasets/1', format="json") - self.assertEqual(response.data['dataset_version_set'][0].get('removed', None), True, - response.data['dataset_version_set']) + response = self.client.get("/rest/datasets/1", format="json") + self.assertEqual( + response.data["dataset_version_set"][0].get("removed", None), + True, + response.data["dataset_version_set"], + ) def test_dataset_version_lists_date_removed(self): # get catalog record - cr = self.client.get('/rest/datasets/1').data + cr = self.client.get("/rest/datasets/1").data # create version2 - cr['research_dataset']['files'].pop(0) - response = self.client.put('/rest/datasets/1', cr, format="json") + cr["research_dataset"]["files"].pop(0) + response = self.client.put("/rest/datasets/1", cr, format="json") # delete version2 - version2 = response.data['next_dataset_version'] - response = self.client.delete('/rest/datasets/%d' % version2['id'], format="json") + version2 = response.data["next_dataset_version"] + response = self.client.delete("/rest/datasets/%d" % version2["id"], format="json") # check date_removed is listed and not None in deleted version - response = self.client.get('/rest/datasets/1', format="json") + response = self.client.get("/rest/datasets/1", format="json") - self.assertTrue(response.data['dataset_version_set'][0].get('date_removed')) - self.assertTrue(response.data['dataset_version_set'][0].get('date_removed') is not None) - self.assertFalse(response.data['dataset_version_set'][1].get('date_removed')) + self.assertTrue(response.data["dataset_version_set"][0].get("date_removed")) + self.assertTrue(response.data["dataset_version_set"][0].get("date_removed") is not None) + self.assertFalse(response.data["dataset_version_set"][1].get("date_removed")) - def test_new_dataset_version_pref_id_type_stays_same_as_previous_dataset_version_pref_id_type(self): + def test_new_dataset_version_pref_id_type_stays_same_as_previous_dataset_version_pref_id_type( + self, + ): # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = IDA_CATALOG - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/datacatalogs", dc, format="json") - self.cr_test_data['data_catalog'] = IDA_CATALOG - self.cr_test_data['research_dataset']['preferred_identifier'] = '' + self.cr_test_data["data_catalog"] = IDA_CATALOG + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" - cr_v1 = self.client.post('/rest/datasets?pid_type=urn', self.cr_test_data, format="json").data - cr_v1['research_dataset']['files'].pop(0) - cr_v2 = self.client.put('/rest/datasets/{0}?pid_type=doi'.format(cr_v1['identifier']), cr_v1, format="json") + cr_v1 = self.client.post( + "/rest/datasets?pid_type=urn", self.cr_test_data, format="json" + ).data + cr_v1["research_dataset"]["files"].pop(0) + cr_v2 = self.client.put( + "/rest/datasets/{0}?pid_type=doi".format(cr_v1["identifier"]), + cr_v1, + format="json", + ) self.assertEqual(cr_v2.status_code, status.HTTP_200_OK, cr_v2.data) - self.assertEqual('new_version_created' in cr_v2.data, True) + self.assertEqual("new_version_created" in cr_v2.data, True) self.assertTrue( - get_identifier_type(cr_v2.data['new_version_created']['preferred_identifier']) == IdentifierType.URN) + get_identifier_type(cr_v2.data["new_version_created"]["preferred_identifier"]) + == IdentifierType.URN + ) - cr_v1 = self.client.post('/rest/datasets?pid_type=doi', self.cr_test_data, format="json").data - cr_v1['research_dataset']['files'].pop(0) - cr_v2 = self.client.put('/rest/datasets/{0}'.format(cr_v1['identifier']), cr_v1, format="json") - self.assertEqual('new_version_created' in cr_v2.data, True) + cr_v1 = self.client.post( + "/rest/datasets?pid_type=doi", self.cr_test_data, format="json" + ).data + cr_v1["research_dataset"]["files"].pop(0) + cr_v2 = self.client.put( + "/rest/datasets/{0}".format(cr_v1["identifier"]), cr_v1, format="json" + ) + self.assertEqual("new_version_created" in cr_v2.data, True) self.assertTrue( - get_identifier_type(cr_v2.data['new_version_created']['preferred_identifier']) == IdentifierType.DOI) + get_identifier_type(cr_v2.data["new_version_created"]["preferred_identifier"]) + == IdentifierType.DOI + ) def _assert_metadata_version_count(self, record, count): - response = self.client.get('/rest/datasets/%d/metadata_versions' % record['id'], format="json") + response = self.client.get( + "/rest/datasets/%d/metadata_versions" % record["id"], format="json" + ) self.assertEqual(len(response.data), count) def _set_cr_to_catalog(self, pk=None, dc=None): @@ -2209,9 +2827,9 @@ def _get_and_update_title(self, pk, params=None): Should not force preferred_identifier to change. """ - data = self.client.get('/rest/datasets/%d' % pk, format="json").data - data['research_dataset']['title']['en'] = 'modified title' - return self.client.put('/rest/datasets/%d%s' % (pk, params or ''), data, format="json") + data = self.client.get("/rest/datasets/%d" % pk, format="json").data + data["research_dataset"]["title"]["en"] = "modified title" + return self.client.put("/rest/datasets/%d%s" % (pk, params or ""), data, format="json") def _get_and_update_files(self, pk, update_preferred_identifier=False, params=None): """ @@ -2222,70 +2840,74 @@ def _get_and_update_files(self, pk, update_preferred_identifier=False, params=No """ file_identifiers = [ { - 'identifier': f.identifier, - 'title': 'title', - 'use_category': { 'identifier': 'outcome' } + "identifier": f.identifier, + "title": "title", + "use_category": {"identifier": "outcome"}, } for f in File.objects.all() ] - data = self.client.get('/rest/datasets/%d' % pk, format="json").data - data['research_dataset']['files'] = file_identifiers[-5:] + data = self.client.get("/rest/datasets/%d" % pk, format="json").data + data["research_dataset"]["files"] = file_identifiers[-5:] if update_preferred_identifier: - new_pref_id = 'modified-preferred-identifier' - data['research_dataset']['preferred_identifier'] = new_pref_id + new_pref_id = "modified-preferred-identifier" + data["research_dataset"]["preferred_identifier"] = new_pref_id return ( - self.client.put('/rest/datasets/%d%s' % (pk, params or ''), data, format="json"), - new_pref_id + self.client.put("/rest/datasets/%d%s" % (pk, params or ""), data, format="json"), + new_pref_id, ) - return self.client.put('/rest/datasets/%d%s' % (pk, params or ''), data, format="json") + return self.client.put("/rest/datasets/%d%s" % (pk, params or ""), data, format="json") def test_allow_metadata_changes_after_deprecation(self): """ For deprecated datasets, file and directory additions/removals are forbidden but metadata changes are allowed. """ - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr = response.data - response = self.client.delete('/rest/files/1') + response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # after the dataset is deprecated, metadata updates should be ok - cr['research_dataset']['description'] = { + cr["research_dataset"]["description"] = { "en": "Updating new description for deprecated dataset should not create any problems" } - cr['research_dataset']['files'][0]['title'] = 'Brand new title 1' + cr["research_dataset"]["files"][0]["title"] = "Brand new title 1" - response = self.client.put('/rest/datasets/%s' % cr['id'], cr, format="json") + response = self.client.put("/rest/datasets/%s" % cr["id"], cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue('new description' in response.data['research_dataset']['description']['en'], - 'description field should be updated') - self.assertTrue('Brand new' in response.data['research_dataset']['files'][0]['title'], - 'title field for file should be updated') + self.assertTrue( + "new description" in response.data["research_dataset"]["description"]["en"], + "description field should be updated", + ) + self.assertTrue( + "Brand new" in response.data["research_dataset"]["files"][0]["title"], + "title field for file should be updated", + ) def test_prevent_adding_removed_file_to_deprecated_dataset(self): - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr = response.data # deprecates the dataset - response = self.client.delete('/rest/files/1') + response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # the file to add to data - response = self.client.delete('/rest/files/4') + response = self.client.delete("/rest/files/4") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr['research_dataset']['files'].append({ - "identifier": "pid:urn:4", - "title": "File Title", - "use_category": { - "identifier": "method" + cr["research_dataset"]["files"].append( + { + "identifier": "pid:urn:4", + "title": "File Title", + "use_category": {"identifier": "method"}, } - }) - response = self.client.put('/rest/datasets/%s' % cr['id'], cr, format="json") + ) + response = self.client.put("/rest/datasets/%s" % cr["id"], cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) @@ -2295,19 +2917,21 @@ class CatalogRecordApiWriteAssignFilesCommon(CatalogRecordApiWriteCommon): """ def _get_file_from_test_data(self): - from_test_data = self._get_object_from_test_data('file', requested_index=0) - from_test_data.update({ - "checksum": { - "value": "checksumvalue", - "algorithm": "SHA-256", - "checked": "2017-05-23T10:07:22.559656Z", - }, - "file_name": "must_replace", - "file_path": "must_replace", - "identifier": "must_replace", - "project_identifier": "must_replace", - "file_storage": self._get_object_from_test_data('filestorage', requested_index=0) - }) + from_test_data = self._get_object_from_test_data("file", requested_index=0) + from_test_data.update( + { + "checksum": { + "value": "checksumvalue", + "algorithm": "SHA-256", + "checked": "2017-05-23T10:07:22.559656Z", + }, + "file_name": "must_replace", + "file_path": "must_replace", + "identifier": "must_replace", + "project_identifier": "must_replace", + "file_storage": self._get_object_from_test_data("filestorage", requested_index=0), + } + ) return from_test_data def _form_test_file_hierarchy(self): @@ -2319,72 +2943,72 @@ def _form_test_file_hierarchy(self): { "file_name": "file_01.txt", "file_path": "/TestExperiment/Directory_1/Group_1/file_01.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_02.txt", "file_path": "/TestExperiment/Directory_1/Group_1/file_02.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_03.txt", "file_path": "/TestExperiment/Directory_1/Group_2/file_03.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_04.txt", "file_path": "/TestExperiment/Directory_1/Group_2/file_04.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_05.txt", "file_path": "/TestExperiment/Directory_1/file_05.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_06.txt", "file_path": "/TestExperiment/Directory_1/file_06.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_07.txt", "file_path": "/TestExperiment/Directory_2/Group_1/file_07.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_08.txt", "file_path": "/TestExperiment/Directory_2/Group_1/file_08.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_09.txt", "file_path": "/TestExperiment/Directory_2/Group_2/file_09.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_10.txt", "file_path": "/TestExperiment/Directory_2/Group_2/file_10.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_11.txt", "file_path": "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_12.txt", "file_path": "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_13.txt", "file_path": "/TestExperiment/Directory_2/file_13.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_14.txt", "file_path": "/TestExperiment/Directory_2/file_14.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, ] @@ -2392,176 +3016,175 @@ def _form_test_file_hierarchy(self): { "file_name": "file_15.txt", "file_path": "/SecondExperiment/Directory_1/Group_1/file_15.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_16.txt", "file_path": "/SecondExperiment/Directory_1/Group_1/file_16.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_17.txt", "file_path": "/SecondExperiment/Directory_1/Group_2/file_18.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_18.txt", "file_path": "/SecondExperiment/Directory_1/Group_2/file_18.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_19.txt", "file_path": "/SecondExperiment/Directory_1/file_19.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_20.txt", "file_path": "/SecondExperiment/Directory_1/file_20.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_21.txt", "file_path": "/SecondExperiment/Data/file_21.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_22.txt", "file_path": "/SecondExperiment/Data_Config/file_22.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_23.txt", "file_path": "/SecondExperiment/Data_Config/file_23.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_24.txt", "file_path": "/SecondExperiment/Data/History/file_24.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, - ] file_template = self._get_file_from_test_data() - del file_template['id'] - self._single_file_byte_size = file_template['byte_size'] + del file_template["id"] + self._single_file_byte_size = file_template["byte_size"] files_1 = [] for i, f in enumerate(file_data_1): file = deepcopy(file_template) - file.update(f, identifier='test:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="test:file:%s" % f["file_name"][-6:-4]) files_1.append(file) files_2 = [] for i, f in enumerate(file_data_2): file = deepcopy(file_template) - file.update(f, identifier='test:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="test:file:%s" % f["file_name"][-6:-4]) files_2.append(file) return files_1, files_2 def _add_directory(self, ds, path, project=None): - params = { 'directory_path': path } + params = {"directory_path": path} if project: - params['project_identifier'] = project + params["project_identifier"] = project identifier = Directory.objects.get(**params).identifier - if 'directories' not in ds['research_dataset']: - ds['research_dataset']['directories'] = [] + if "directories" not in ds["research_dataset"]: + ds["research_dataset"]["directories"] = [] - ds['research_dataset']['directories'].append({ - "identifier": identifier, - "title": "Directory Title", - "description": "This is directory at %s" % path, - "use_category": { - "identifier": "method" + ds["research_dataset"]["directories"].append( + { + "identifier": identifier, + "title": "Directory Title", + "description": "This is directory at %s" % path, + "use_category": {"identifier": "method"}, } - }) + ) def _add_file(self, ds, path): identifier = File.objects.filter(file_path__startswith=path).first().identifier - if 'files' not in ds['research_dataset']: - ds['research_dataset']['files'] = [] + if "files" not in ds["research_dataset"]: + ds["research_dataset"]["files"] = [] - ds['research_dataset']['files'].append({ - "identifier": identifier, - "title": "File Title", - "description": "This is file at %s" % path, - "use_category": { - "identifier": "method" + ds["research_dataset"]["files"].append( + { + "identifier": identifier, + "title": "File Title", + "description": "This is file at %s" % path, + "use_category": {"identifier": "method"}, } - }) + ) def _add_nonexisting_directory(self, ds): - ds['research_dataset']['directories'] = [{ - "identifier": "doesnotexist", - "title": "Directory Title", - "description": "This is directory does not exist", - "use_category": { - "identifier": "method" + ds["research_dataset"]["directories"] = [ + { + "identifier": "doesnotexist", + "title": "Directory Title", + "description": "This is directory does not exist", + "use_category": {"identifier": "method"}, } - }] + ] def _add_nonexisting_file(self, ds): - ds['research_dataset']['files'] = [{ - "identifier": "doesnotexist", - "title": "File Title", - "description": "This is file does not exist", - "use_category": { - "identifier": "method" + ds["research_dataset"]["files"] = [ + { + "identifier": "doesnotexist", + "title": "File Title", + "description": "This is file does not exist", + "use_category": {"identifier": "method"}, } - }] + ] def _remove_directory(self, ds, path): - if 'directories' not in ds['research_dataset']: - raise Exception('ds has no dirs') + if "directories" not in ds["research_dataset"]: + raise Exception("ds has no dirs") identifier = Directory.objects.get(directory_path=path).identifier - for i, dr in enumerate(ds['research_dataset']['directories']): - if dr['identifier'] == identifier: - ds['research_dataset']['directories'].pop(i) + for i, dr in enumerate(ds["research_dataset"]["directories"]): + if dr["identifier"] == identifier: + ds["research_dataset"]["directories"].pop(i) return - raise Exception('path %s not found in directories' % path) + raise Exception("path %s not found in directories" % path) def _remove_file(self, ds, path): - if 'files' not in ds['research_dataset']: - raise Exception('ds has no files') + if "files" not in ds["research_dataset"]: + raise Exception("ds has no files") identifier = File.objects.get(file_path=path).identifier - for i, f in enumerate(ds['research_dataset']['files']): - if f['identifier'] == identifier: - ds['research_dataset']['files'].pop(i) + for i, f in enumerate(ds["research_dataset"]["files"]): + if f["identifier"] == identifier: + ds["research_dataset"]["files"].pop(i) return - raise Exception('path %s not found in files' % path) + raise Exception("path %s not found in files" % path) def _freeze_new_files(self): file_data = [ { "file_name": "file_90.txt", "file_path": "/TestExperiment/Directory_2/Group_3/file_90.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_91.txt", "file_path": "/TestExperiment/Directory_2/Group_3/file_91.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, ] file_template = self._get_file_from_test_data() - del file_template['id'] - self._single_file_byte_size = file_template['byte_size'] + del file_template["id"] + self._single_file_byte_size = file_template["byte_size"] files = [] for i, f in enumerate(file_data): file = deepcopy(file_template) - file.update(f, identifier='frozen:later:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="frozen:later:file:%s" % f["file_name"][-6:-4]) files.append(file) - response = self.client.post('/rest/files', files, format="json") + response = self.client.post("/rest/files", files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def _freeze_files_to_root(self): @@ -2569,25 +3192,25 @@ def _freeze_files_to_root(self): { "file_name": "file_56.txt", "file_path": "/TestExperiment/Directory_2/file_56.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_57.txt", "file_path": "/TestExperiment/Directory_2/file_57.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, ] file_template = self._get_file_from_test_data() - del file_template['id'] - self._single_file_byte_size = file_template['byte_size'] + del file_template["id"] + self._single_file_byte_size = file_template["byte_size"] files = [] for i, f in enumerate(file_data): file = deepcopy(file_template) - file.update(f, identifier='frozen:later:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="frozen:later:file:%s" % f["file_name"][-6:-4]) files.append(file) - response = self.client.post('/rest/files', files, format="json") + response = self.client.post("/rest/files", files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def setUp(self): @@ -2597,28 +3220,34 @@ def setUp(self): - create 12 new files in a new project """ super().setUp() - self.cr_test_data['research_dataset'].pop('id', None) - self.cr_test_data['research_dataset'].pop('preferred_identifier', None) - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("id", None) + self.cr_test_data["research_dataset"].pop("preferred_identifier", None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) project_files = self._form_test_file_hierarchy() for p_files in project_files: - response = self.client.post('/rest/files', p_files, format="json") + response = self.client.post("/rest/files", p_files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def assert_preferred_identifier_changed(self, response, true_or_false): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('next_dataset_version' in response.data, true_or_false, - 'this field should only be present if preferred_identifier changed') + self.assertEqual( + "next_dataset_version" in response.data, + true_or_false, + "this field should only be present if preferred_identifier changed", + ) if true_or_false is True: - self.assertEqual(response.data['research_dataset']['preferred_identifier'] != - response.data['next_dataset_version']['preferred_identifier'], true_or_false) + self.assertEqual( + response.data["research_dataset"]["preferred_identifier"] + != response.data["next_dataset_version"]["preferred_identifier"], + true_or_false, + ) def assert_file_count(self, cr, expected_file_count): - self.assertEqual(CatalogRecord.objects.get(pk=cr['id']).files.count(), expected_file_count) + self.assertEqual(CatalogRecord.objects.get(pk=cr["id"]).files.count(), expected_file_count) def assert_total_files_byte_size(self, cr, expected_size): - self.assertEqual(cr['research_dataset']['total_files_byte_size'], expected_size) + self.assertEqual(cr["research_dataset"]["total_files_byte_size"], expected_size) class CatalogRecordApiWriteCumulativeDatasets(CatalogRecordApiWriteAssignFilesCommon): @@ -2633,98 +3262,127 @@ def _create_cumulative_dataset_with_files(self): """ Create cumulative dataset with two files that will be updated. """ - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_06.txt') - self.cr_test_data['cumulative_state'] = 1 # YES + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_06.txt") + self.cr_test_data["cumulative_state"] = 1 # YES - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data # i.e. the dataset + return response.data # i.e. the dataset def _create_cumulative_dataset_without_files(self): """ Create cumulative dataset without any files. """ - self.cr_test_data['cumulative_state'] = 1 # YES + self.cr_test_data["cumulative_state"] = 1 # YES - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data def test_create_cumulative_dataset_with_state_closed(self): - self.cr_test_data['cumulative_state'] = 2 # CLOSED + self.cr_test_data["cumulative_state"] = 2 # CLOSED - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_create_cumulative_dataset_with_preservation_state(self): - self.cr_test_data['cumulative_state'] = 1 - self.cr_test_data['preservation_state'] = 10 + self.cr_test_data["cumulative_state"] = 1 + self.cr_test_data["preservation_state"] = 10 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('PAS' in response.data['detail'][0], response.data) + self.assertTrue("PAS" in response.data["detail"][0], response.data) def test_create_cumulative_dataset_sets_date_cumulation_started(self): - self.cr_test_data['cumulative_state'] = 1 + self.cr_test_data["cumulative_state"] = 1 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['date_cumulation_started'], response.data['date_created'], response.data) - self.assertTrue('date_cumulation_ended' not in response.data, response.data) - self.assertTrue('date_last_cumulative_addition' not in response.data, response.data) + self.assertEqual( + response.data["date_cumulation_started"], + response.data["date_created"], + response.data, + ) + self.assertTrue("date_cumulation_ended" not in response.data, response.data) + self.assertTrue("date_last_cumulative_addition" not in response.data, response.data) def test_add_files_to_empty_cumulative_dataset(self): cr = self._create_cumulative_dataset_without_files() total_record_count_beginning = CatalogRecord.objects_unfiltered.all().count() - self._add_file(cr, '/TestExperiment/Directory_1/Group_1/file_01.txt') + self._add_file(cr, "/TestExperiment/Directory_1/Group_1/file_01.txt") response = self.update_record(cr) current_record_count = CatalogRecord.objects_unfiltered.all().count() self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(current_record_count, total_record_count_beginning, 'there should be no new datasets') + self.assertEqual( + current_record_count, + total_record_count_beginning, + "there should be no new datasets", + ) def test_adding_files_to_cumulative_dataset_creates_no_new_versions(self): """ Tests the basic idea of cumulative dataset: add files with no new version """ cr = self._create_cumulative_dataset_with_files() - self._add_file(cr, '/TestExperiment/Directory_1/Group_1/file_01.txt') - self._add_file(cr, '/TestExperiment/Directory_1/Group_1/file_02.txt') + self._add_file(cr, "/TestExperiment/Directory_1/Group_1/file_01.txt") + self._add_file(cr, "/TestExperiment/Directory_1/Group_1/file_02.txt") total_record_count_beginning = CatalogRecord.objects_unfiltered.all().count() response = self.update_record(cr) current_record_count = CatalogRecord.objects_unfiltered.all().count() self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(current_record_count, total_record_count_beginning, 'there should be no new datasets') + self.assertEqual( + current_record_count, + total_record_count_beginning, + "there should be no new datasets", + ) # two + two is four, quik mafs self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) - self.assertEqual('new_dataset_version' in response.data, False, 'New version should not be created') + self.assertEqual( + "new_dataset_version" in response.data, + False, + "New version should not be created", + ) cr = response.data - self._add_directory(cr, '/TestExperiment/Directory_2/Group_2') + self._add_directory(cr, "/TestExperiment/Directory_2/Group_2") response = self.update_record(cr) current_record_count = CatalogRecord.objects_unfiltered.all().count() self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(current_record_count, total_record_count_beginning, 'there should be no new datasets') + self.assertEqual( + current_record_count, + total_record_count_beginning, + "there should be no new datasets", + ) self.assert_file_count(response.data, 8) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 8) - self.assertEqual('new_dataset_version' in response.data, False, 'New version should not be created') + self.assertEqual( + "new_dataset_version" in response.data, + False, + "New version should not be created", + ) - def test_adding_files_to_cumulative_dataset_changes_date_last_cumulative_addition(self): + def test_adding_files_to_cumulative_dataset_changes_date_last_cumulative_addition( + self, + ): cr = self._create_cumulative_dataset_with_files() - self._add_file(cr, '/TestExperiment/Directory_1/Group_1/file_01.txt') - self._add_file(cr, '/TestExperiment/Directory_1/Group_1/file_02.txt') - sleep(1) # ensure that next request happens with different timestamp + self._add_file(cr, "/TestExperiment/Directory_1/Group_1/file_01.txt") + self._add_file(cr, "/TestExperiment/Directory_1/Group_1/file_02.txt") + sleep(1) # ensure that next request happens with different timestamp response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data['date_last_cumulative_addition'] != response.data['date_created'], response.data) + self.assertTrue( + response.data["date_last_cumulative_addition"] != response.data["date_created"], + response.data, + ) def test_add_single_sub_directory(self): """ @@ -2732,14 +3390,14 @@ def test_add_single_sub_directory(self): """ cr = self._create_cumulative_dataset_with_files() - self._add_directory(cr, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') + self._add_directory(cr, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) cr = response.data - self._add_directory(response.data, '/TestExperiment/Directory_2/Group_2') + self._add_directory(response.data, "/TestExperiment/Directory_2/Group_2") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assert_file_count(response.data, 6) @@ -2752,14 +3410,14 @@ def test_single_common_root_directory(self): """ cr = self._create_cumulative_dataset_with_files() - self._add_directory(cr, '/TestExperiment/Directory_2') + self._add_directory(cr, "/TestExperiment/Directory_2") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assert_file_count(response.data, 10) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 10) - self._add_directory(cr, '/TestExperiment/Directory_2/Group_2') - self._add_directory(cr, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') + self._add_directory(cr, "/TestExperiment/Directory_2/Group_2") + self._add_directory(cr, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assert_file_count(response.data, 10) @@ -2775,28 +3433,31 @@ def test_add_multiple_files_and_directories(self): cr = self._create_cumulative_dataset_with_files() # add one directory, which holds a two files and one sub directory which also holds two files. # four new files are added - self._add_directory(cr, '/TestExperiment/Directory_2/Group_2') + self._add_directory(cr, "/TestExperiment/Directory_2/Group_2") response = self.update_record(cr) self.assert_file_count(response.data, 6) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 6) # separately add (describe) a child directory of the previous dir. # no new files are added - self._add_directory(response.data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') + self._add_directory(response.data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") response = self.update_record(response.data) self.assert_file_count(response.data, 6) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 6) # add a single new file not included by the previously added directories. # new files are added - self._add_file(response.data, '/TestExperiment/Directory_2/file_14.txt') + self._add_file(response.data, "/TestExperiment/Directory_2/file_14.txt") response = self.update_record(response.data) self.assert_file_count(response.data, 7) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 7) # add a single new file already included by the previously added directories. # new files are not added - self._add_file(response.data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt') + self._add_file( + response.data, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", + ) response = self.update_record(response.data) self.assert_file_count(response.data, 7) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 7) @@ -2813,7 +3474,7 @@ def test_add_files_which_were_frozen_later(self): cr = self._create_cumulative_dataset_with_files() # add new root directory which holds eight files - self._add_directory(cr, '/TestExperiment/Directory_2') + self._add_directory(cr, "/TestExperiment/Directory_2") response = self.update_record(cr) self.assert_preferred_identifier_changed(response, False) self.assert_file_count(response.data, 10) @@ -2831,7 +3492,7 @@ def test_add_files_which_were_frozen_later(self): self._freeze_new_files() # only added files are included in the dataset - self._add_file(response.data, '/TestExperiment/Directory_2/Group_3/file_90.txt') + self._add_file(response.data, "/TestExperiment/Directory_2/Group_3/file_90.txt") response = self.update_record(response.data) self.assert_preferred_identifier_changed(response, False) self.assert_file_count(response.data, 11) @@ -2844,16 +3505,16 @@ def test_metadata_changes_do_not_add_later_frozen_files(self): specifically added in the update. """ # create the original record with just one directory - self.cr_test_data['cumulative_state'] = 1 - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["cumulative_state"] = 1 + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 8) original_version = response.data self._freeze_new_files() - original_version['research_dataset']['version_notes'] = [str(datetime.now())] + original_version["research_dataset"]["version_notes"] = [str(datetime.now())] response = self.update_record(original_version) self.assert_file_count(response.data, 8) @@ -2864,41 +3525,43 @@ def test_add_files_with_preserve_version_flag(self): to allow adding with same version. """ cr = self._create_cumulative_dataset_with_files() - self._add_file(cr, '/TestExperiment/Directory_1/Group_1/file_01.txt') - response = self.client.put('/rest/datasets/%s?preserve_version' % cr['identifier'], cr, format="json") + self._add_file(cr, "/TestExperiment/Directory_1/Group_1/file_01.txt") + response = self.client.put( + "/rest/datasets/%s?preserve_version" % cr["identifier"], cr, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_change_preservation_state(self): cr = self._create_cumulative_dataset_with_files() - cr['preservation_state'] = 10 - response = self.client.put('/rest/datasets/%s' % cr['identifier'], cr, format="json") + cr["preservation_state"] = 10 + response = self.client.put("/rest/datasets/%s" % cr["identifier"], cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_remove_files_from_cumulative_dataset(self): cr = self._create_cumulative_dataset_with_files() - self._remove_file(cr, '/TestExperiment/Directory_1/file_06.txt') + self._remove_file(cr, "/TestExperiment/Directory_1/file_06.txt") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # ensure that there are no changes - response = self.client.get('/rest/datasets/%s' % cr['identifier'], format="json") + response = self.client.get("/rest/datasets/%s" % cr["identifier"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assert_file_count(response.data, 2) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 2) def test_remove_directory_from_cumulative_dataset(self): cr = self._create_cumulative_dataset_with_files() - self._add_directory(cr, '/TestExperiment/Directory_2') + self._add_directory(cr, "/TestExperiment/Directory_2") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._remove_directory(cr, '/TestExperiment/Directory_2') + self._remove_directory(cr, "/TestExperiment/Directory_2") response = self.update_record(cr) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # ensure that there are no changes - response = self.client.get('/rest/datasets/%s' % cr['identifier'], format="json") + response = self.client.get("/rest/datasets/%s" % cr["identifier"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assert_file_count(response.data, 10) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 10) @@ -2916,9 +3579,9 @@ def test_files_are_saved_during_create(self): """ A very simple "add two individual files" test. """ - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_06.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_06.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 2) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 2) @@ -2927,9 +3590,9 @@ def test_directories_are_saved_during_create(self): """ A very simple "add two individual directories" test. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) @@ -2938,10 +3601,10 @@ def test_single_common_root_directory(self): """ A very simple "there is a single common root directory" test. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 8) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 8) @@ -2951,11 +3614,13 @@ def test_directory_names_are_similar(self): Ensure similar directory names are not mistaken to have parent/child relations, i.e. directory separator-character is the true separator of dirs in the path. """ - self._add_directory(self.cr_test_data, '/SecondExperiment/Data') - self._add_directory(self.cr_test_data, '/SecondExperiment/Data/History') - self._add_directory(self.cr_test_data, '/SecondExperiment/Data_Config') # the interesting dir + self._add_directory(self.cr_test_data, "/SecondExperiment/Data") + self._add_directory(self.cr_test_data, "/SecondExperiment/Data/History") + self._add_directory( + self.cr_test_data, "/SecondExperiment/Data_Config" + ) # the interesting dir - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) @@ -2964,11 +3629,11 @@ def test_files_and_directories_are_saved_during_create(self): """ A very simple "add two individual directories and two files" test. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_2') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_06.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_2") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_06.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 6) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 6) @@ -2977,10 +3642,10 @@ def test_files_and_directories_are_saved_during_create_2(self): """ Save a directory, and also two files from the same directory. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_01.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_01.txt") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 2) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 2) @@ -2990,16 +3655,16 @@ def test_empty_files_and_directories_arrays_are_removed(self): If an update is trying to leave empty "files" or "directories" array into research_dataset, they should be removed entirely during the update. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_01.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_01.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") cr = response.data - cr['research_dataset']['directories'] = [] - cr['research_dataset']['files'] = [] - response = self.client.put('/rest/datasets/%d' % cr['id'], cr, format="json") + cr["research_dataset"]["directories"] = [] + cr["research_dataset"]["files"] = [] + response = self.client.put("/rest/datasets/%d" % cr["id"], cr, format="json") new_version = self.get_next_version(response.data) - self.assertEqual('directories' in new_version['research_dataset'], False, response.data) - self.assertEqual('files' in new_version['research_dataset'], False, response.data) + self.assertEqual("directories" in new_version["research_dataset"], False, response.data) + self.assertEqual("files" in new_version["research_dataset"], False, response.data) def test_multiple_file_and_directory_changes(self): """ @@ -3011,15 +3676,15 @@ def test_multiple_file_and_directory_changes(self): """ # create the original record with just one file - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/file_13.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_2/file_13.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(CatalogRecord.objects.get(pk=response.data['id']).files.count(), 1) + self.assertEqual(CatalogRecord.objects.get(pk=response.data["id"]).files.count(), 1) # add one directory, which holds a couple of files and one sub directory which also holds two files. # new files are added original_version = response.data - self._add_directory(original_version, '/TestExperiment/Directory_2/Group_2') + self._add_directory(original_version, "/TestExperiment/Directory_2/Group_2") response = self.update_record(original_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3028,13 +3693,13 @@ def test_multiple_file_and_directory_changes(self): # separately add (describe) a child directory of the previous dir. # no new files are added - self._add_directory(new_version, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') + self._add_directory(new_version, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") response = self.update_record(new_version) - self.assertEqual('new_dataset_version' in response.data, False) + self.assertEqual("new_dataset_version" in response.data, False) # add a single new file not included by the previously added directories. # new files are added - self._add_file(new_version, '/TestExperiment/Directory_2/file_14.txt') + self._add_file(new_version, "/TestExperiment/Directory_2/file_14.txt") response = self.update_record(new_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3043,7 +3708,7 @@ def test_multiple_file_and_directory_changes(self): # remove the previously added file, not included by the previously added directories. # files are removed - self._remove_file(new_version, '/TestExperiment/Directory_2/file_14.txt') + self._remove_file(new_version, "/TestExperiment/Directory_2/file_14.txt") response = self.update_record(new_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3052,26 +3717,32 @@ def test_multiple_file_and_directory_changes(self): # add a single new file already included by the previously added directories. # new files are not added - self._add_file(new_version, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt') + self._add_file( + new_version, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", + ) response = self.update_record(new_version) - self.assertEqual('new_dataset_version' in response.data, False) + self.assertEqual("new_dataset_version" in response.data, False) # remove the sub dir added previously. files are also still contained by the other upper dir. # files are not removed - self._remove_directory(new_version, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') + self._remove_directory(new_version, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") response = self.update_record(new_version) - self.assertEqual('new_dataset_version' in response.data, False) + self.assertEqual("new_dataset_version" in response.data, False) # remove a previously added file, the file is still contained by the other upper dir. # files are not removed - self._remove_file(new_version, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt') + self._remove_file( + new_version, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", + ) response = self.update_record(new_version) - self.assertEqual('new_dataset_version' in response.data, False) + self.assertEqual("new_dataset_version" in response.data, False) # remove the last directory, which should remove the 4 files included by this dir and the sub dir. # files are removed. # only the originally added single file should be left. - self._remove_directory(new_version, '/TestExperiment/Directory_2/Group_2') + self._remove_directory(new_version, "/TestExperiment/Directory_2/Group_2") response = self.update_record(new_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3089,16 +3760,16 @@ def test_add_files_which_were_frozen_later(self): """ # create the original record with just one directory - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(CatalogRecord.objects.get(pk=response.data['id']).files.count(), 8) + self.assertEqual(CatalogRecord.objects.get(pk=response.data["id"]).files.count(), 8) original_version = response.data self._freeze_new_files() # add one new file - self._add_file(original_version, '/TestExperiment/Directory_2/Group_3/file_90.txt') + self._add_file(original_version, "/TestExperiment/Directory_2/Group_3/file_90.txt") response = self.update_record(original_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3106,7 +3777,7 @@ def test_add_files_which_were_frozen_later(self): self.assert_total_files_byte_size(new_version, self._single_file_byte_size * 9) # add one new directory, which holds one new file, since the other file was already added - self._add_directory(new_version, '/TestExperiment/Directory_2/Group_3') + self._add_directory(new_version, "/TestExperiment/Directory_2/Group_3") response = self.update_record(new_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3121,17 +3792,17 @@ def test_metadata_changes_do_not_add_later_frozen_files(self): """ # create the original record with just one directory - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(CatalogRecord.objects.get(pk=response.data['id']).files.count(), 8) + self.assertEqual(CatalogRecord.objects.get(pk=response.data["id"]).files.count(), 8) original_version = response.data self._freeze_new_files() - original_version['research_dataset']['version_notes'] = [str(datetime.now())] + original_version["research_dataset"]["version_notes"] = [str(datetime.now())] response = self.update_record(original_version) - self.assertEqual('next_dataset_version' in response.data, False) + self.assertEqual("next_dataset_version" in response.data, False) self.assert_file_count(response.data, 8) def test_removing_top_level_directory_does_not_remove_all_files(self): @@ -3144,11 +3815,13 @@ def test_removing_top_level_directory_does_not_remove_all_files(self): # note: see the method _form_test_file_hierarchy() to inspect what the directories # contain in more detail. - self._add_directory(self.cr_test_data, '/TestExperiment') # 14 files (removed later) - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1') # 6 files - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') # 8 files (removed later) - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2') # 4 files - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment") # 14 files (removed later) + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1") # 6 files + self._add_directory( + self.cr_test_data, "/TestExperiment/Directory_2" + ) # 8 files (removed later) + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2") # 4 files + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 14) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 14) @@ -3157,8 +3830,8 @@ def test_removing_top_level_directory_does_not_remove_all_files(self): # remove the root dir, and another sub-dir. there should be two directories left. both of them # are now "top-level directories", since they have no common parent. # files are removed - self._remove_directory(original_version, '/TestExperiment') - self._remove_directory(original_version, '/TestExperiment/Directory_2') + self._remove_directory(original_version, "/TestExperiment") + self._remove_directory(original_version, "/TestExperiment/Directory_2") response = self.update_record(original_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3169,8 +3842,8 @@ def test_add_multiple_directories(self): """ Ensure adding multiple directories at once really adds files from all new directories. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 2) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 2) @@ -3178,9 +3851,9 @@ def test_add_multiple_directories(self): # add new directories. # files are added - self._add_directory(original_version, '/TestExperiment/Directory_2/Group_1') - self._add_directory(original_version, '/TestExperiment/Directory_2/Group_2') - self._add_directory(original_version, '/SecondExperiment/Directory_1/Group_1') + self._add_directory(original_version, "/TestExperiment/Directory_2/Group_1") + self._add_directory(original_version, "/TestExperiment/Directory_2/Group_2") + self._add_directory(original_version, "/SecondExperiment/Directory_1/Group_1") response = self.update_record(original_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3191,10 +3864,10 @@ def test_add_multiple_directories_2(self): """ Ensure adding multiple directories at once really adds files from all new directories. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 14) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 14) @@ -3204,9 +3877,9 @@ def test_add_files_from_different_projects(self): Add directories from two different projects, ensure both projects' top-level dirs are handled properly, and none of the projects interferes with each other. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') - self._add_directory(self.cr_test_data, '/SecondExperiment/Directory_1/Group_1') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") + self._add_directory(self.cr_test_data, "/SecondExperiment/Directory_1/Group_1") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) @@ -3214,7 +3887,7 @@ def test_add_files_from_different_projects(self): # add a new directory. this is a top-level of the previous dir, which contains new files. # files are added - self._add_directory(original_version, '/TestExperiment/Directory_2/Group_2') + self._add_directory(original_version, "/TestExperiment/Directory_2/Group_2") response = self.update_record(original_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3223,7 +3896,7 @@ def test_add_files_from_different_projects(self): # remove the previously added dir, which is now the top-level dir in that project. # files are removed - self._remove_directory(new_version, '/TestExperiment/Directory_2/Group_2') + self._remove_directory(new_version, "/TestExperiment/Directory_2/Group_2") response = self.update_record(new_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3232,7 +3905,7 @@ def test_add_files_from_different_projects(self): # remove dirs from the second project entirely. # files are removed - self._remove_directory(new_version, '/SecondExperiment/Directory_1/Group_1') + self._remove_directory(new_version, "/SecondExperiment/Directory_1/Group_1") response = self.update_record(new_version) self.assert_preferred_identifier_changed(response, True) new_version = self.get_next_version(response.data) @@ -3240,16 +3913,16 @@ def test_add_files_from_different_projects(self): self.assert_total_files_byte_size(new_version, self._single_file_byte_size * 2) def test_file_not_found(self): - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") original_version = response.data self._add_nonexisting_file(original_version) response = self.update_record(original_version) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_directory_not_found(self): - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") original_version = response.data self._add_nonexisting_directory(original_version) response = self.update_record(original_version) @@ -3262,19 +3935,25 @@ def test_prevent_file_changes_to_old_dataset_versions(self): """ # create original record - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/file_13.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_2/file_13.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") original_version = response.data # make a file change, so that a new dataset version is created - self._add_file(original_version, '/TestExperiment/Directory_2/file_14.txt') + self._add_file(original_version, "/TestExperiment/Directory_2/file_14.txt") response = self.update_record(original_version) # now try to make a file change to the older dataset versions. this should not be permitted - self._add_file(original_version, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt') + self._add_file( + original_version, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", + ) response = self.update_record(original_version) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, - 'file changes in old dataset versions should not be allowed') + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "file changes in old dataset versions should not be allowed", + ) # other tests related to adding files / dirs @@ -3283,57 +3962,60 @@ def test_file_and_dir_titles_are_populated_when_omitted(self): If field 'title' is omitted from file or dir metadata, their respective file_name or directory_name should automatically be populated as title. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_01.txt') + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_01.txt") # ensure titles are not overwritten when specified by the user orig_titles = [ - self.cr_test_data['research_dataset']['files'][0]['title'], - self.cr_test_data['research_dataset']['directories'][0]['title'], + self.cr_test_data["research_dataset"]["files"][0]["title"], + self.cr_test_data["research_dataset"]["directories"][0]["title"], ] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertTrue( - response.data['research_dataset']['files'][0]['title'] in orig_titles, - response.data['research_dataset']['files'][0]['title'] + response.data["research_dataset"]["files"][0]["title"] in orig_titles, + response.data["research_dataset"]["files"][0]["title"], ) self.assertTrue( - response.data['research_dataset']['directories'][0]['title'] in orig_titles, - response.data['research_dataset']['directories'][0]['title'] + response.data["research_dataset"]["directories"][0]["title"] in orig_titles, + response.data["research_dataset"]["directories"][0]["title"], ) # ensure titles are automatically populated when omitted by the user - del self.cr_test_data['research_dataset']['files'][0]['title'] - del self.cr_test_data['research_dataset']['directories'][0]['title'] + del self.cr_test_data["research_dataset"]["files"][0]["title"] + del self.cr_test_data["research_dataset"]["directories"][0]["title"] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertTrue( - response.data['research_dataset']['files'][0]['title'] not in orig_titles, - response.data['research_dataset']['files'][0]['title'] + response.data["research_dataset"]["files"][0]["title"] not in orig_titles, + response.data["research_dataset"]["files"][0]["title"], ) self.assertTrue( - response.data['research_dataset']['directories'][0]['title'] not in orig_titles, - response.data['research_dataset']['directories'][0]['title'] + response.data["research_dataset"]["directories"][0]["title"] not in orig_titles, + response.data["research_dataset"]["directories"][0]["title"], ) def test_prevent_non_existent_additions_to_deprecated_dataset(self): - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_file( + self.cr_test_data, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", + ) + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - file_id = cr['research_dataset']['files'][0]['identifier'] + file_id = cr["research_dataset"]["files"][0]["identifier"] - response = self.client.delete('/rest/files/%s' % file_id, format="json") + response = self.client.delete("/rest/files/%s" % file_id, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._add_nonexisting_directory(cr) - response = self.client.put('/rest/datasets/%s' % cr['id'], cr, format="json") + response = self.client.put("/rest/datasets/%s" % cr["id"], cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) @@ -3344,15 +4026,20 @@ class CatalogRecordApiWriteRemoteResources(CatalogRecordApiWriteCommon): """ def test_calculate_total_remote_resources_byte_size(self): - cr_with_rr = self._get_object_from_test_data('catalogrecord', requested_index=14) - rr = cr_with_rr['research_dataset']['remote_resources'] - total_remote_resources_byte_size = sum(res['byte_size'] for res in rr) - self.cr_att_test_data['research_dataset']['remote_resources'] = rr - response = self.client.post('/rest/datasets', self.cr_att_test_data, format="json") + cr_with_rr = self._get_object_from_test_data("catalogrecord", requested_index=14) + rr = cr_with_rr["research_dataset"]["remote_resources"] + total_remote_resources_byte_size = sum(res["byte_size"] for res in rr) + self.cr_att_test_data["research_dataset"]["remote_resources"] = rr + response = self.client.post("/rest/datasets", self.cr_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('total_remote_resources_byte_size' in response.data['research_dataset'], True) - self.assertEqual(response.data['research_dataset']['total_remote_resources_byte_size'], - total_remote_resources_byte_size) + self.assertEqual( + "total_remote_resources_byte_size" in response.data["research_dataset"], + True, + ) + self.assertEqual( + response.data["research_dataset"]["total_remote_resources_byte_size"], + total_remote_resources_byte_size, + ) class CatalogRecordApiWriteLegacyDataCatalogs(CatalogRecordApiWriteCommon): @@ -3366,55 +4053,57 @@ def setUp(self): Create a test-datacatalog that plays the role of a legacy catalog. """ super().setUp() - dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema='att').first() - dc.catalog_json['identifier'] = LEGACY_CATALOGS[0] + dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema="att").first() + dc.catalog_json["identifier"] = LEGACY_CATALOGS[0] dc.force_save() - del self.cr_test_data['research_dataset']['files'] - del self.cr_test_data['research_dataset']['total_files_byte_size'] + del self.cr_test_data["research_dataset"]["files"] + del self.cr_test_data["research_dataset"]["total_files_byte_size"] def test_legacy_catalog_pids_are_not_unique(self): # values provided as pid values in legacy catalogs are not required to be unique # within the catalog. - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" same_pid_ids = [] for i in range(3): - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], 'a') - same_pid_ids.append(response.data['id']) + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "a") + same_pid_ids.append(response.data["id"]) # pid can even be same as an existing dataset's pid in an ATT catalog real_pid = CatalogRecord.objects.get(pk=1).preferred_identifier - self.cr_test_data['research_dataset']['preferred_identifier'] = real_pid - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = real_pid + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], real_pid) + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], real_pid) def test_legacy_catalog_pid_must_be_provided(self): # pid cant be empty string - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # pid cant be omitted - del self.cr_test_data['research_dataset']['preferred_identifier'] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + del self.cr_test_data["research_dataset"]["preferred_identifier"] + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_legacy_catalog_pids_update(self): # test setup - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # update record. in updates uniqueness also should not be checked modify = response.data real_pid = CatalogRecord.objects.get(pk=1).preferred_identifier - modify['research_dataset']['preferred_identifier'] = real_pid - response = self.client.put('/rest/datasets/%s?include_legacy' % modify['id'], modify, format="json") + modify["research_dataset"]["preferred_identifier"] = real_pid + response = self.client.put( + "/rest/datasets/%s?include_legacy" % modify["id"], modify, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_delete_legacy_catalog_dataset(self): @@ -3424,17 +4113,17 @@ def test_delete_legacy_catalog_dataset(self): """ # test setup - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # delete record - response = self.client.delete('/rest/datasets/%s?include_legacy' % cr_id, format="json") + response = self.client.delete("/rest/datasets/%s?include_legacy" % cr_id, format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) results_count = CatalogRecord.objects_unfiltered.filter(pk=cr_id).count() - self.assertEqual(results_count, 0, 'record should have been deleted permantly') + self.assertEqual(results_count, 0, "record should have been deleted permantly") class CatalogRecordApiWriteOwnerFields(CatalogRecordApiWriteCommon): @@ -3453,45 +4142,49 @@ def test_metadata_owner_org_is_copied_from_metadata_provider_org(self): """ # create - cr = self.client.get('/rest/datasets/1', format="json").data - cr.pop('id') - cr.pop('identifier') - cr.pop('metadata_owner_org') - cr['research_dataset'].pop('preferred_identifier') - response = self.client.post('/rest/datasets', cr, format="json") + cr = self.client.get("/rest/datasets/1", format="json").data + cr.pop("id") + cr.pop("identifier") + cr.pop("metadata_owner_org") + cr["research_dataset"].pop("preferred_identifier") + response = self.client.post("/rest/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['metadata_owner_org'], response.data['metadata_provider_org']) + self.assertEqual( + response.data["metadata_owner_org"], response.data["metadata_provider_org"] + ) # update to null - update is prevented - cr = self.client.get('/rest/datasets/1', format="json").data - original = cr['metadata_owner_org'] - cr['metadata_owner_org'] = None - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1", format="json").data + original = cr["metadata_owner_org"] + cr["metadata_owner_org"] = None + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['metadata_owner_org'], original) + self.assertEqual(response.data["metadata_owner_org"], original) # update with patch, where metadata_owner_org field is absent - value is not reverted back # to metadata_provider_org - response = self.client.patch('/rest/datasets/1', { 'metadata_owner_org': 'abc' }, format="json") + response = self.client.patch( + "/rest/datasets/1", {"metadata_owner_org": "abc"}, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.patch('/rest/datasets/1', { 'contract': 1 }, format="json") - self.assertEqual(response.data['metadata_owner_org'], 'abc') + response = self.client.patch("/rest/datasets/1", {"contract": 1}, format="json") + self.assertEqual(response.data["metadata_owner_org"], "abc") def test_metadata_provider_org_is_readonly_after_creating(self): - cr = self.client.get('/rest/datasets/1', format="json").data - original = cr['metadata_provider_org'] - cr['metadata_provider_org'] = 'changed' - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1", format="json").data + original = cr["metadata_provider_org"] + cr["metadata_provider_org"] = "changed" + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['metadata_provider_org'], original) + self.assertEqual(response.data["metadata_provider_org"], original) def test_metadata_provider_user_is_readonly_after_creating(self): - cr = self.client.get('/rest/datasets/1', format="json").data - original = cr['metadata_provider_user'] - cr['metadata_provider_user'] = 'changed' - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1", format="json").data + original = cr["metadata_provider_user"] + cr["metadata_provider_user"] = "changed" + response = self.client.put("/rest/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['metadata_provider_user'], original) + self.assertEqual(response.data["metadata_provider_user"], original) class CatalogRecordApiEndUserAccess(CatalogRecordApiWriteCommon): @@ -3507,20 +4200,20 @@ def setUp(self): dc = DataCatalog.objects.get(pk=1) catalog_json = dc.catalog_json for identifier in END_USER_ALLOWED_DATA_CATALOGS: - catalog_json['identifier'] = identifier + catalog_json["identifier"] = identifier dc = DataCatalog.objects.create( catalog_json=catalog_json, date_created=get_tz_aware_now_without_micros(), - catalog_record_services_create='testuser,api_auth_user,metax', - catalog_record_services_edit='testuser,api_auth_user,metax', - catalog_record_services_read='testuser,api_auth_user,metax' + catalog_record_services_create="testuser,api_auth_user,metax", + catalog_record_services_edit="testuser,api_auth_user,metax", + catalog_record_services_read="testuser,api_auth_user,metax", ) self.token = get_test_oidc_token() # by default, use the unmodified token. to use a different/modified token # for various test scenarions, alter self.token, and call the below method again - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) # no reason to test anything related to failed authentication, since failed # authentication stops the request from proceeding anywhere @@ -3528,183 +4221,195 @@ def setUp(self): def _set_cr_owner_to_token_user(self, cr_id): cr = CatalogRecord.objects.get(pk=cr_id) - cr.user_created = self.token['CSCUserName'] - cr.metadata_provider_user = self.token['CSCUserName'] - cr.editor = None # pretend the record was created by user directly + cr.user_created = self.token["CSCUserName"] + cr.metadata_provider_user = self.token["CSCUserName"] + cr.editor = None # pretend the record was created by user directly cr.force_save() def _set_cr_to_permitted_catalog(self, cr_id): cr = CatalogRecord.objects.get(pk=cr_id) - cr.data_catalog_id = DataCatalog.objects.get(catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0]).id + cr.data_catalog_id = DataCatalog.objects.get( + catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0] + ).id cr.force_save() @responses.activate def test_user_can_create_dataset(self): - ''' + """ Ensure end user can create a new dataset, and required fields are automatically placed and the user is only able to affect allowed fields - ''' - user_created = self.token['CSCUserName'] - metadata_provider_user = self.token['CSCUserName'] - metadata_provider_org = self.token['schacHomeOrganization'] - metadata_owner_org = self.token['schacHomeOrganization'] - - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - self.cr_test_data['contract'] = 1 - self.cr_test_data['editor'] = { 'nope': 'discarded by metax' } - self.cr_test_data['preservation_description'] = 'discarded by metax' - self.cr_test_data['preservation_reason_description'] = 'discarded by metax' - self.cr_test_data['preservation_state'] = 10 - self.cr_test_data.pop('metadata_provider_user', None) - self.cr_test_data.pop('metadata_provider_org', None) - self.cr_test_data.pop('metadata_owner_org', None) + """ + user_created = self.token["CSCUserName"] + metadata_provider_user = self.token["CSCUserName"] + metadata_provider_org = self.token["schacHomeOrganization"] + metadata_owner_org = self.token["schacHomeOrganization"] + + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida + self.cr_test_data["contract"] = 1 + self.cr_test_data["editor"] = {"nope": "discarded by metax"} + self.cr_test_data["preservation_description"] = "discarded by metax" + self.cr_test_data["preservation_reason_description"] = "discarded by metax" + self.cr_test_data["preservation_state"] = 10 + self.cr_test_data.pop("metadata_provider_user", None) + self.cr_test_data.pop("metadata_provider_org", None) + self.cr_test_data.pop("metadata_owner_org", None) # test file permission checking in another test - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) - self.assertEqual(response.data['user_created'], user_created) - self.assertEqual(response.data['metadata_provider_user'], metadata_provider_user) - self.assertEqual(response.data['metadata_provider_org'], metadata_provider_org) - self.assertEqual(response.data['metadata_owner_org'], metadata_owner_org) - self.assertEqual('contract' in response.data, False) - self.assertEqual('editor' in response.data, False) - self.assertEqual('preservation_description' in response.data, False) - self.assertEqual('preservation_reason_description' in response.data, False) - self.assertEqual(response.data['preservation_state'], 0) + self.assertEqual(response.data["user_created"], user_created) + self.assertEqual(response.data["metadata_provider_user"], metadata_provider_user) + self.assertEqual(response.data["metadata_provider_org"], metadata_provider_org) + self.assertEqual(response.data["metadata_owner_org"], metadata_owner_org) + self.assertEqual("contract" in response.data, False) + self.assertEqual("editor" in response.data, False) + self.assertEqual("preservation_description" in response.data, False) + self.assertEqual("preservation_reason_description" in response.data, False) + self.assertEqual(response.data["preservation_state"], 0) @responses.activate def test_user_can_create_datasets_only_to_limited_catalogs(self): - ''' + """ End users should not be able to create datasets for example to harvested data catalogs. - ''' + """ # test file permission checking in another test - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) # should not work - self.cr_test_data['data_catalog'] = 1 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = 1 + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # check error has expected error description - self.assertEqual('selected data catalog' in response.data['detail'][0], True, response.data) + self.assertEqual("selected data catalog" in response.data["detail"][0], True, response.data) # should work # draft catalog cannot be used in V1 api so skip them here - for identifier in [ dc for dc in END_USER_ALLOWED_DATA_CATALOGS if dc != DFT_CATALOG ]: + for identifier in [dc for dc in END_USER_ALLOWED_DATA_CATALOGS if dc != DFT_CATALOG]: if identifier in LEGACY_CATALOGS: - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" - self.cr_test_data['data_catalog'] = identifier - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = identifier + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) if identifier in LEGACY_CATALOGS: # prevents next test from crashing if legacy catalog is not the last in the list - del self.cr_test_data['research_dataset']['preferred_identifier'] + del self.cr_test_data["research_dataset"]["preferred_identifier"] @responses.activate def test_owner_can_edit_dataset(self): - ''' + """ Ensure end users are able to edit datasets owned by them. Ensure end users can only edit permitted fields. Note: File project permissions should not be checked, since files are not changed. - ''' + """ # create test record - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] - self.cr_test_data['research_dataset'].pop('files', None) # test file permission checking in another test - self.cr_test_data['research_dataset'].pop('directories', None) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] + self.cr_test_data["research_dataset"].pop( + "files", None + ) # test file permission checking in another test + self.cr_test_data["research_dataset"].pop("directories", None) + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) modified_data = response.data # research_dataset is the only permitted field to edit - modified_data['research_dataset']['value'] = 112233 - modified_data['contract'] = 1 - modified_data['editor'] = { 'nope': 'discarded by metax' } - modified_data['preservation_description'] = 'discarded by metax' - modified_data['preservation_reason_description'] = 'discarded by metax' - modified_data['preservation_state'] = 10 - - response = self.client.put('/rest/datasets/%d' % modified_data['id'], modified_data, format="json") + modified_data["research_dataset"]["value"] = 112233 + modified_data["contract"] = 1 + modified_data["editor"] = {"nope": "discarded by metax"} + modified_data["preservation_description"] = "discarded by metax" + modified_data["preservation_reason_description"] = "discarded by metax" + modified_data["preservation_state"] = 10 + + response = self.client.put( + "/rest/datasets/%d" % modified_data["id"], modified_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['research_dataset']['value'], 112233) # value we set - self.assertEqual(response.data['user_modified'], self.token['CSCUserName']) # set by metax + self.assertEqual(response.data["research_dataset"]["value"], 112233) # value we set + self.assertEqual(response.data["user_modified"], self.token["CSCUserName"]) # set by metax # none of these should have been affected - self.assertEqual('contract' in response.data, False) - self.assertEqual('editor' in response.data, False) - self.assertEqual('preservation_description' in response.data, False) - self.assertEqual('preservation_reason_description' in response.data, False) - self.assertEqual(response.data['preservation_state'], 0) + self.assertEqual("contract" in response.data, False) + self.assertEqual("editor" in response.data, False) + self.assertEqual("preservation_description" in response.data, False) + self.assertEqual("preservation_reason_description" in response.data, False) + self.assertEqual(response.data["preservation_state"], 0) @responses.activate def test_owner_can_edit_datasets_only_in_permitted_catalogs(self): - ''' + """ Ensure end users are able to edit datasets only in permitted catalogs, even if they own the record (catalog may be disabled from end user editing for reason or another). - ''' + """ # create test record - self.cr_test_data['data_catalog'] = 1 - self.cr_test_data['user_created'] = self.token['CSCUserName'] - self.cr_test_data['metadata_provider_user'] = self.token['CSCUserName'] - self.cr_test_data.pop('editor', None) + self.cr_test_data["data_catalog"] = 1 + self.cr_test_data["user_created"] = self.token["CSCUserName"] + self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] + self.cr_test_data.pop("editor", None) - self._use_http_authorization() # create cr as a service-user - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._use_http_authorization() # create cr as a service-user + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) modified_data = response.data - modified_data['research_dataset']['value'] = 112233 + modified_data["research_dataset"]["value"] = 112233 - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.put('/rest/datasets/%d' % modified_data['id'], modified_data, format="json") + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.put( + "/rest/datasets/%d" % modified_data["id"], modified_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @responses.activate def test_owner_can_edit_dataset_check_perms_from_editor_field(self): - ''' + """ Ensure end user perms are also checked from the field 'editor', which may be set by .e.g. qvain. - ''' - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] - self.cr_test_data['user_created'] = 'editor field is checked before this field, so should be ok' - self.cr_test_data['editor'] = { 'owner_id': self.token['CSCUserName'] } + """ + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] + self.cr_test_data[ + "user_created" + ] = "editor field is checked before this field, so should be ok" + self.cr_test_data["editor"] = {"owner_id": self.token["CSCUserName"]} - self._use_http_authorization() # create cr as a service-user to ensure editor-field is set + self._use_http_authorization() # create cr as a service-user to ensure editor-field is set - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.get('/rest/datasets/%d' % response.data['id'], format="json") + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.get("/rest/datasets/%d" % response.data["id"], format="json") modified_data = response.data - modified_data['research_dataset']['value'] = 112233 + modified_data["research_dataset"]["value"] = 112233 - response = self.client.put('/rest/datasets/%d' % response.data['id'], modified_data, format="json") + response = self.client.put( + "/rest/datasets/%d" % response.data["id"], modified_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) @responses.activate def test_other_users_cant_edit_dataset(self): - ''' + """ Ensure end users are unable edit datasets not owned by them. - ''' - response = self.client.get('/rest/datasets/1', format="json") + """ + response = self.client.get("/rest/datasets/1", format="json") modified_data = response.data - modified_data['research_dataset']['value'] = 112233 + modified_data["research_dataset"]["value"] = 112233 - response = self.client.put('/rest/datasets/1', modified_data, format="json") + response = self.client.put("/rest/datasets/1", modified_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.put('/rest/datasets', [modified_data], format="json") + response = self.client.put("/rest/datasets", [modified_data], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # ^ individual errors do not have error codes, only the general request # has an error code for a failed request. @@ -3713,90 +4418,100 @@ def test_other_users_cant_edit_dataset(self): def test_user_can_delete_dataset(self): self._set_cr_owner_to_token_user(1) self._set_cr_to_permitted_catalog(1) - response = self.client.delete('/rest/datasets/1', format="json") + response = self.client.delete("/rest/datasets/1", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) @responses.activate def test_user_file_permissions_are_checked_during_dataset_create(self): - ''' + """ Ensure user's association with a project is checked during dataset create when attaching files or directories to a dataset. - ''' + """ # try creating without proper permisisons - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.content) # add project membership to user's token and try again - file_identifier = self.cr_test_data['research_dataset']['files'][0]['identifier'] + file_identifier = self.cr_test_data["research_dataset"]["files"][0]["identifier"] project_identifier = File.objects.get(identifier=file_identifier).project_identifier - self.token['group_names'].append('IDA01:%s' % project_identifier) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % project_identifier) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) @responses.activate def test_user_file_permissions_are_checked_during_dataset_update(self): - ''' + """ Ensure user's association with a project is checked during dataset update when attaching files or directories to a dataset. The permissions should be checked only for changed files (newly added, or removed). - ''' + """ # get some files to add to another dataset - response = self.client.get('/rest/datasets/2', format="json") - new_files = response.data['research_dataset']['files'] + response = self.client.get("/rest/datasets/2", format="json") + new_files = response.data["research_dataset"]["files"] # this is the dataset we'll modify self._set_cr_owner_to_token_user(1) self._set_cr_to_permitted_catalog(1) - response = self.client.get('/rest/datasets/1', format="json") + response = self.client.get("/rest/datasets/1", format="json") # ensure the files really are new for f in new_files: - for existing_f in response.data['research_dataset']['files']: - assert f['identifier'] != existing_f['identifier'], 'test preparation failure, files should differ' + for existing_f in response.data["research_dataset"]["files"]: + assert ( + f["identifier"] != existing_f["identifier"] + ), "test preparation failure, files should differ" modified_data = response.data - modified_data['research_dataset']['files'].extend(new_files) + modified_data["research_dataset"]["files"].extend(new_files) # should fail, since user's token has no permission for the newly added files - response = self.client.put('/rest/datasets/1', modified_data, format="json") + response = self.client.put("/rest/datasets/1", modified_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.content) # add project membership to user's token and try again - project_identifier = File.objects.get(identifier=new_files[0]['identifier']).project_identifier - self.token['group_names'].append('IDA01:%s' % project_identifier) - self._use_http_authorization(method='bearer', token=self.token) + project_identifier = File.objects.get( + identifier=new_files[0]["identifier"] + ).project_identifier + self.token["group_names"].append("IDA01:%s" % project_identifier) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.put('/rest/datasets/1', modified_data, format="json") + response = self.client.put("/rest/datasets/1", modified_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) @responses.activate def test_owner_receives_unfiltered_dataset_data(self): - ''' + """ The general public will have some fields filtered out from the dataset, in order to protect sensitive data. The owner of a dataset however should always receive full data. - ''' + """ self._set_cr_owner_to_token_user(1) def _check_fields(obj): - for sensitive_field in ['email', 'telephone', 'phone']: - self.assertEqual(sensitive_field in obj['research_dataset']['curator'][0], True, - 'field %s should be present' % sensitive_field) + for sensitive_field in ["email", "telephone", "phone"]: + self.assertEqual( + sensitive_field in obj["research_dataset"]["curator"][0], + True, + "field %s should be present" % sensitive_field, + ) for cr in CatalogRecord.objects.filter(pk=1): - cr.research_dataset['curator'][0].update({ - 'email': 'email@mail.com', - 'phone': '123124', - 'telephone': '123124', - }) + cr.research_dataset["curator"][0].update( + { + "email": "email@mail.com", + "phone": "123124", + "telephone": "123124", + } + ) cr.force_save() - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) _check_fields(response.data) + class CatalogRecordExternalServicesAccess(CatalogRecordApiWriteCommon): """ @@ -3809,187 +4524,218 @@ def setUp(self): """ super().setUp() - self.dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema='att').first() - self.dc.catalog_json['identifier'] = EXT_CATALOG - self.dc.catalog_json['harvested'] = True - self.dc.catalog_record_services_create = 'external' - self.dc.catalog_record_services_edit = 'external' + self.dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema="att").first() + self.dc.catalog_json["identifier"] = EXT_CATALOG + self.dc.catalog_json["harvested"] = True + self.dc.catalog_record_services_create = "external" + self.dc.catalog_record_services_edit = "external" self.dc.force_save() - self.cr_test_data['data_catalog'] = self.dc.catalog_json['identifier'] - del self.cr_test_data['research_dataset']['files'] - del self.cr_test_data['research_dataset']['total_files_byte_size'] + self.cr_test_data["data_catalog"] = self.dc.catalog_json["identifier"] + del self.cr_test_data["research_dataset"]["files"] + del self.cr_test_data["research_dataset"]["total_files_byte_size"] - self._use_http_authorization(username=django_settings.API_EXT_USER['username'], - password=django_settings.API_EXT_USER['password']) + self._use_http_authorization( + username=django_settings.API_EXT_USER["username"], + password=django_settings.API_EXT_USER["password"], + ) def test_external_service_can_not_read_all_metadata_in_other_catalog(self): - ''' External service should get the same output from someone elses catalog than anonymous user ''' + """ External service should get the same output from someone elses catalog than anonymous user """ # create a catalog that does not belong to our external service dc2 = DataCatalog.objects.get(pk=2) - dc2.catalog_json['identifier'] = 'Some other catalog' - dc2.catalog_record_services_read = 'metax' + dc2.catalog_json["identifier"] = "Some other catalog" + dc2.catalog_record_services_read = "metax" dc2.force_save() # Create a catalog record that belongs to some other user & our catalog nr2 cr = CatalogRecord.objects.get(pk=12) - cr.user_created = '#### Some owner who is not you ####' - cr.metadata_provider_user = '#### Some owner who is not you ####' + cr.user_created = "#### Some owner who is not you ####" + cr.metadata_provider_user = "#### Some owner who is not you ####" cr.data_catalog = dc2 cr.editor = None - cr.research_dataset['access_rights']['access_type']['identifier'] = ACCESS_TYPES['restricted'] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] cr.force_save() # Let's try to return the data with our external services credentials - response_service_user = self.client.get('/rest/datasets/12') - self.assertEqual(response_service_user.status_code, status.HTTP_200_OK, response_service_user.data) + response_service_user = self.client.get("/rest/datasets/12") + self.assertEqual( + response_service_user.status_code, + status.HTTP_200_OK, + response_service_user.data, + ) # Test access as unauthenticated user self.client._credentials = {} - response_anonymous = self.client.get('/rest/datasets/12') - self.assertEqual(response_anonymous.status_code, status.HTTP_200_OK, response_anonymous.data) + response_anonymous = self.client.get("/rest/datasets/12") + self.assertEqual( + response_anonymous.status_code, status.HTTP_200_OK, response_anonymous.data + ) - self.assertEqual(response_anonymous.data, response_service_user.data, - "External service with no read-rights should not see any more metadata than anonymous user from a catalog") + self.assertEqual( + response_anonymous.data, + response_service_user.data, + "External service with no read-rights should not see any more metadata than anonymous user from a catalog", + ) def assert_catalog_record_not_open_access(self, cr): from metax_api.models.catalog_record import ACCESS_TYPES - access_type = cr['research_dataset'].get('access_rights', {}).get('access_type', {}).get('identifier', '') - assert(access_type != ACCESS_TYPES['open']) + + access_type = ( + cr["research_dataset"] + .get("access_rights", {}) + .get("access_type", {}) + .get("identifier", "") + ) + assert access_type != ACCESS_TYPES["open"] def test_external_service_can_add_catalog_record_to_own_catalog(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = '123456' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], '123456') + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "123456") def test_external_service_can_update_catalog_record_in_own_catalog(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = '123456' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], '123456') + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "123456") - cr_id = response.data['id'] - self.cr_test_data['research_dataset']['preferred_identifier'] = '654321' - response = self.client.put('/rest/datasets/{}'.format(cr_id), self.cr_test_data, format="json") + cr_id = response.data["id"] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "654321" + response = self.client.put( + "/rest/datasets/{}".format(cr_id), self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], '654321') + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "654321") def test_external_service_can_delete_catalog_record_from_own_catalog(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = '123456' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") - cr_id = response.data['id'] - response = self.client.delete('/rest/datasets/{}'.format(cr_id)) + cr_id = response.data["id"] + response = self.client.delete("/rest/datasets/{}".format(cr_id)) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - response = self.client.get('/rest/datasets/{}'.format(cr_id), format="json") - self.assertEqual('not found' in response.json()['detail'].lower(), True) + response = self.client.get("/rest/datasets/{}".format(cr_id), format="json") + self.assertEqual("not found" in response.json()["detail"].lower(), True) def test_external_service_can_not_add_catalog_record_to_other_catalog(self): - dc = self._get_object_from_test_data('datacatalog', requested_index=1) - self.cr_test_data['data_catalog'] = dc['catalog_json']['identifier'] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'temp-pid' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + dc = self._get_object_from_test_data("datacatalog", requested_index=1) + self.cr_test_data["data_catalog"] = dc["catalog_json"]["identifier"] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "temp-pid" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_external_service_can_not_update_catalog_record_in_other_catalog(self): - response = self.client.put('/rest/datasets/1', {}, format="json") + response = self.client.put("/rest/datasets/1", {}, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_external_service_can_not_delete_catalog_record_from_other_catalog(self): - response = self.client.delete('/rest/datasets/1') + response = self.client.delete("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_harvested_catalogs_must_have_preferred_identifier_create(self): # create without preferred identifier - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('must have preferred identifier' in - response.data['research_dataset']['preferred_identifier'][0], True) + self.assertEqual( + "must have preferred identifier" + in response.data["research_dataset"]["preferred_identifier"][0], + True, + ) - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('must have preferred identifier' in - response.data['research_dataset']['preferred_identifier'][0], True) + self.assertEqual( + "must have preferred identifier" + in response.data["research_dataset"]["preferred_identifier"][0], + True, + ) -@unittest.skipIf(django_settings.REMS['ENABLED'] is not True, 'Only run if REMS is enabled') +@unittest.skipIf(django_settings.REMS["ENABLED"] is not True, "Only run if REMS is enabled") class CatalogRecordApiWriteREMS(CatalogRecordApiWriteCommon): cache = RedisClient() rf = RDM.get_reference_data(cache) # get by code to prevent failures if list ordering changes - access_permit = [type for type in rf['reference_data']['access_type'] if type['code'] == 'permit'][0] - access_open = [type for type in rf['reference_data']['access_type'] if type['code'] == 'open'][0] + access_permit = [ + type for type in rf["reference_data"]["access_type"] if type["code"] == "permit" + ][0] + access_open = [type for type in rf["reference_data"]["access_type"] if type["code"] == "open"][ + 0 + ] permit_rights = { # license type does not matter "license": [ { - "title": rf['reference_data']['license'][0]['label'], - "identifier": rf['reference_data']['license'][0]['uri'] + "title": rf["reference_data"]["license"][0]["label"], + "identifier": rf["reference_data"]["license"][0]["uri"], } ], "access_type": { - "in_scheme": access_permit['scheme'], - "identifier": access_permit['uri'], - "pref_label": access_permit['label'] - } + "in_scheme": access_permit["scheme"], + "identifier": access_permit["uri"], + "pref_label": access_permit["label"], + }, } open_rights = { "access_type": { - "in_scheme": access_open['scheme'], - "identifier": access_open['uri'], - "pref_label": access_open['label'] + "in_scheme": access_open["scheme"], + "identifier": access_open["uri"], + "pref_label": access_open["label"], } } # any other than what is included in permit_rights is sufficient - other_license = rf['reference_data']['license'][1] + other_license = rf["reference_data"]["license"][1] def setUp(self): super().setUp() # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = IDA_CATALOG - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/datacatalogs", dc, format="json") # token for end user access self.token = get_test_oidc_token(new_proxy=True) # mock successful rems access for creation, add fails later if needed. # Not using regex to allow individual access failures - for entity in ['user', 'workflow', 'license', 'resource', 'catalogue-item']: - self._mock_rems_write_access_succeeds('POST', entity, 'create') + for entity in ["user", "workflow", "license", "resource", "catalogue-item"]: + self._mock_rems_write_access_succeeds("POST", entity, "create") - self._mock_rems_read_access_succeeds('license') + self._mock_rems_read_access_succeeds("license") # mock successful rems access for deletion. Add fails later - for entity in ['catalogue-item', 'workflow', 'resource']: - self._mock_rems_write_access_succeeds(method='PUT', entity=entity, action='archived') - self._mock_rems_write_access_succeeds(method='PUT', entity=entity, action='enabled') + for entity in ["catalogue-item", "workflow", "resource"]: + self._mock_rems_write_access_succeeds(method="PUT", entity=entity, action="archived") + self._mock_rems_write_access_succeeds(method="PUT", entity=entity, action="enabled") - self._mock_rems_read_access_succeeds('catalogue-item') - self._mock_rems_read_access_succeeds('application') - self._mock_rems_write_access_succeeds(method='POST', entity='application', action='close') + self._mock_rems_read_access_succeeds("catalogue-item") + self._mock_rems_read_access_succeeds("application") + self._mock_rems_write_access_succeeds(method="POST", entity="application", action="close") responses.add( responses.GET, f"{django_settings.REMS['BASE_URL']}/health", - json={'healthy': True}, - status=200 + json={"healthy": True}, + status=200, ) def _get_access_granter(self, malformed=False): @@ -3999,7 +4745,7 @@ def _get_access_granter(self, malformed=False): access_granter = { "userid": "testcaseuser" if not malformed else 1234, "name": "Test User", - "email": "testcase@user.com" + "email": "testcase@user.com", } return access_granter @@ -4010,23 +4756,23 @@ def _mock_rems_write_access_succeeds(self, method, entity, action): entity: REMS entity [application, catalogue-item, license, resource, user, workflow] action: Action taken to entity [archived, close, create, edit, enabled] """ - req_type = responses.POST if method == 'POST' else responses.PUT + req_type = responses.POST if method == "POST" else responses.PUT body = {"success": True} - if method == 'POST' and action != 'close': + if method == "POST" and action != "close": # action condition needed because applications are closed with POST method - body['id'] = 6 + body["id"] = 6 responses.add( req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", json=body, - status=200 + status=200, ) def _mock_rems_read_access_succeeds(self, entity): - if entity == 'license': + if entity == "license": resp = [ { "id": 7, @@ -4035,14 +4781,14 @@ def _mock_rems_read_access_succeeds(self, entity): "archived": False, "localizations": { "fi": { - "title": self.rf['reference_data']['license'][0]['label']['fi'], - "textcontent": self.rf['reference_data']['license'][0]['uri'] + "title": self.rf["reference_data"]["license"][0]["label"]["fi"], + "textcontent": self.rf["reference_data"]["license"][0]["uri"], }, "und": { - "title": self.rf['reference_data']['license'][0]['label']['und'], - "textcontent": self.rf['reference_data']['license'][0]['uri'] - } - } + "title": self.rf["reference_data"]["license"][0]["label"]["und"], + "textcontent": self.rf["reference_data"]["license"][0]["uri"], + }, + }, }, { "id": 8, @@ -4051,14 +4797,14 @@ def _mock_rems_read_access_succeeds(self, entity): "archived": False, "localizations": { "en": { - "title": self.rf['reference_data']['license'][1]['label']['en'], - "textcontent": self.rf['reference_data']['license'][1]['uri'] + "title": self.rf["reference_data"]["license"][1]["label"]["en"], + "textcontent": self.rf["reference_data"]["license"][1]["uri"], } - } - } + }, + }, ] - elif entity == 'catalogue-item': + elif entity == "catalogue-item": resp = [ { "archived": False, @@ -4067,7 +4813,7 @@ def _mock_rems_read_access_succeeds(self, entity): "id": 18, "langcode": "en", "title": "Removal test", - "infourl": "https://url.to.etsin.fi" + "infourl": "https://url.to.etsin.fi", } }, "resource-id": 19, @@ -4078,113 +4824,97 @@ def _mock_rems_read_access_succeeds(self, entity): "id": 18, "expired": False, "end": None, - "enabled": True + "enabled": True, } ] - elif entity == 'application': + elif entity == "application": # only mock relevant data resp = [ { - 'application/workflow': { - 'workflow.dynamic/handlers': [ - { - 'userid': 'somehandler' - } - ] + "application/workflow": { + "workflow.dynamic/handlers": [{"userid": "somehandler"}] }, "application/id": 3, - 'application/applicant': { - 'userid': 'someapplicant' - }, + "application/applicant": {"userid": "someapplicant"}, "application/resources": [ { - "catalogue-item/title": { - "en": "Removal test" - }, + "catalogue-item/title": {"en": "Removal test"}, "resource/ext-id": "some:pref:id", - "catalogue-item/id": 5 + "catalogue-item/id": 5, } ], - "application/state": 'application.state/draft' + "application/state": "application.state/draft", }, { - 'application/workflow': { - 'workflow.dynamic/handlers': [ - { - 'userid': 'someid' - } - ] - }, + "application/workflow": {"workflow.dynamic/handlers": [{"userid": "someid"}]}, "application/id": 2, - 'application/applicant': { - 'userid': 'someotherapplicant' - }, + "application/applicant": {"userid": "someotherapplicant"}, "application/resources": [ { - "catalogue-item/title": { - "en": "Removal test" - }, + "catalogue-item/title": {"en": "Removal test"}, "resource/ext-id": "some:pref:id", - "catalogue-item/id": 5 + "catalogue-item/id": 5, } ], - "application/state": 'application.state/approved' + "application/state": "application.state/approved", }, { - 'application/workflow': { - 'workflow.dynamic/handlers': [ - { - 'userid': 'remsuid' - } - ] - }, + "application/workflow": {"workflow.dynamic/handlers": [{"userid": "remsuid"}]}, "application/id": 1, - 'application/applicant': { - 'userid': 'someapplicant' - }, + "application/applicant": {"userid": "someapplicant"}, "application/resources": [ { - "catalogue-item/title": { - "en": "Removal test" - }, - "resource/ext-id": 'Same:title:with:different:catalogue:item', - "catalogue-item/id": 18 + "catalogue-item/title": {"en": "Removal test"}, + "resource/ext-id": "Same:title:with:different:catalogue:item", + "catalogue-item/id": 18, } ], - "application/state": 'application.state/draft' - } + "application/state": "application.state/draft", + }, ] responses.add( responses.GET, f"{django_settings.REMS['BASE_URL']}/{entity}s", json=resp, - status=200 + status=200, ) - def _mock_rems_access_return_403(self, method, entity, action=''): + def _mock_rems_access_return_403(self, method, entity, action=""): """ Works also for GET method since failure responses from rems are identical for write and read operations """ - req_type = responses.POST if method == 'POST' else responses.PUT if method == 'PUT' else responses.GET + req_type = ( + responses.POST + if method == "POST" + else responses.PUT + if method == "PUT" + else responses.GET + ) responses.replace( req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - status=403 # anything else than 200 is a fail + status=403, # anything else than 200 is a fail ) - def _mock_rems_access_return_error(self, method, entity, action=''): + def _mock_rems_access_return_error(self, method, entity, action=""): """ operation status is defined in the body so 200 response can also be failure. """ - req_type = responses.POST if method == 'POST' else responses.PUT if method == 'PUT' else responses.GET + req_type = ( + responses.POST + if method == "POST" + else responses.PUT + if method == "PUT" + else responses.GET + ) errors = [ { "type": "some kind of identifier of this error", - "somedetail": "entity identifier the error is conserning" + "somedetail": "entity identifier the error is conserning", } ] @@ -4192,30 +4922,36 @@ def _mock_rems_access_return_error(self, method, entity, action=''): req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", json={"success": False, "errors": errors}, - status=200 + status=200, ) - def _mock_rems_access_crashes(self, method, entity, action=''): + def _mock_rems_access_crashes(self, method, entity, action=""): """ Crash happens for example if there is a network error. Can be used for GET also """ - req_type = responses.POST if method == 'POST' else responses.PUT if method == 'PUT' else responses.GET + req_type = ( + responses.POST + if method == "POST" + else responses.PUT + if method == "PUT" + else responses.GET + ) responses.replace( req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - body=Exception('REMS_service should catch this one also') + body=Exception("REMS_service should catch this one also"), ) def _create_new_rems_dataset(self): """ Modifies catalog record to be REMS managed and post it to Metax """ - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG - self.cr_test_data['access_granter'] = self._get_access_granter() + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG + self.cr_test_data["access_granter"] = self._get_access_granter() - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") return response @@ -4226,26 +4962,32 @@ def test_creating_permit_dataset_creates_catalogue_item_service_succeeds(self): """ response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(response.data.get('rems_identifier') is not None, 'rems_identifier should be present') - self.assertTrue(response.data.get('access_granter') is not None, 'access_granter should be present') + self.assertTrue( + response.data.get("rems_identifier") is not None, + "rems_identifier should be present", + ) + self.assertTrue( + response.data.get("access_granter") is not None, + "access_granter should be present", + ) @responses.activate def test_creating_permit_dataset_creates_catalogue_item_service_fails_1(self): """ Test unsuccessful rems access """ - self._mock_rems_access_return_403('POST', 'workflow', 'create') + self._mock_rems_access_return_403("POST", "workflow", "create") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue('failed to publish updates' in response.data['detail'][0], response.data) + self.assertTrue("failed to publish updates" in response.data["detail"][0], response.data) @responses.activate def test_creating_permit_dataset_creates_catalogue_item_service_fails_2(self): """ Test unsuccessful rems access """ - self._mock_rems_access_return_error('POST', 'catalogue-item', 'create') + self._mock_rems_access_return_error("POST", "catalogue-item", "create") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -4255,11 +4997,11 @@ def test_creating_permit_dataset_creates_catalogue_item_service_fails_3(self): """ Test unsuccessful rems access """ - self._mock_rems_access_crashes('POST', 'resource', 'create') + self._mock_rems_access_crashes("POST", "resource", "create") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue('failed to publish updates' in response.data['detail'][0], response.data) + self.assertTrue("failed to publish updates" in response.data["detail"][0], response.data) @responses.activate def test_changing_dataset_to_permit_creates_new_catalogue_item_succeeds(self): @@ -4268,40 +5010,46 @@ def test_changing_dataset_to_permit_creates_new_catalogue_item_succeeds(self): """ # create dataset without rems managed access - self.cr_test_data['research_dataset']['access_rights'] = self.open_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # change to rems managed cr = response.data - cr['research_dataset']['access_rights'] = self.permit_rights - cr['access_granter'] = self._get_access_granter() + cr["research_dataset"]["access_rights"] = self.permit_rights + cr["access_granter"] = self._get_access_granter() response = self.client.put(f'/rest/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data.get('rems_identifier') is not None, 'rems_identifier should be present') - self.assertTrue(response.data.get('access_granter') is not None, 'access_granter should be present') + self.assertTrue( + response.data.get("rems_identifier") is not None, + "rems_identifier should be present", + ) + self.assertTrue( + response.data.get("access_granter") is not None, + "access_granter should be present", + ) @responses.activate def test_changing_dataset_to_permit_creates_new_catalogue_item_fails(self): """ Test error handling on metax update operation """ - self._mock_rems_access_return_error('POST', 'user', 'create') + self._mock_rems_access_return_error("POST", "user", "create") # create dataset without rems managed access - self.cr_test_data['research_dataset']['access_rights'] = self.open_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # change to rems managed cr = response.data - cr['research_dataset']['access_rights'] = self.permit_rights - cr['access_granter'] = self._get_access_granter() + cr["research_dataset"]["access_rights"] = self.permit_rights + cr["access_granter"] = self._get_access_granter() response = self.client.put(f'/rest/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -4312,7 +5060,7 @@ def test_changing_access_type_to_other_closes_rems_entities_succeeds(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - cr['research_dataset']['access_rights'] = self.open_rights + cr["research_dataset"]["access_rights"] = self.open_rights response = self.client.put(f'/rest/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @@ -4322,10 +5070,10 @@ def test_changing_access_type_to_other_closes_rems_entities_fails(self): response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self._mock_rems_access_return_error('POST', 'application', 'close') + self._mock_rems_access_return_error("POST", "application", "close") cr = response.data - cr['research_dataset']['access_rights'] = self.open_rights + cr["research_dataset"]["access_rights"] = self.open_rights response = self.client.put(f'/rest/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -4341,11 +5089,11 @@ def test_changing_dataset_license_updates_rems(self): cr_before = response.data - rems_id_before = cr_before['rems_identifier'] - cr_before['research_dataset']['access_rights']['license'] = [ + rems_id_before = cr_before["rems_identifier"] + cr_before["research_dataset"]["access_rights"]["license"] = [ { - "title": self.other_license['label'], - "identifier": self.other_license['uri'] + "title": self.other_license["label"], + "identifier": self.other_license["uri"], } ] @@ -4353,7 +5101,11 @@ def test_changing_dataset_license_updates_rems(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = response.data - self.assertNotEqual(rems_id_before, cr_after['rems_identifier'], 'REMS identifier should have been changed') + self.assertNotEqual( + rems_id_before, + cr_after["rems_identifier"], + "REMS identifier should have been changed", + ) @responses.activate def test_changing_license_dont_allow_access_granter_changes(self): @@ -4366,18 +5118,20 @@ def test_changing_license_dont_allow_access_granter_changes(self): cr_before = response.data - cr_before['access_granter']['userid'] = 'newid' - cr_before['research_dataset']['access_rights']['license'] = [ - { - "identifier": self.other_license['uri'] - } + cr_before["access_granter"]["userid"] = "newid" + cr_before["research_dataset"]["access_rights"]["license"] = [ + {"identifier": self.other_license["uri"]} ] response = self.client.put(f'/rest/datasets/{cr_before["id"]}', cr_before, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = response.data - self.assertNotEqual('newid', cr_after['access_granter']['userid'], 'userid should not have been changed') + self.assertNotEqual( + "newid", + cr_after["access_granter"]["userid"], + "userid should not have been changed", + ) @responses.activate def test_deleting_license_updates_rems(self): @@ -4389,14 +5143,20 @@ def test_deleting_license_updates_rems(self): cr_before = response.data - cr_before['research_dataset']['access_rights'].pop('license') + cr_before["research_dataset"]["access_rights"].pop("license") response = self.client.put(f'/rest/datasets/{cr_before["id"]}', cr_before, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = response.data - self.assertTrue(cr_after.get('rems_identifier') is None, 'REMS identifier should have been deleted') - self.assertTrue(cr_after.get('access_granter') is None, 'access_granter should have been deleted') + self.assertTrue( + cr_after.get("rems_identifier") is None, + "REMS identifier should have been deleted", + ) + self.assertTrue( + cr_after.get("access_granter") is None, + "access_granter should have been deleted", + ) @responses.activate def test_creating_permit_dataset_creates_catalogue_item_end_user(self): @@ -4404,20 +5164,20 @@ def test_creating_permit_dataset_creates_catalogue_item_end_user(self): Tests that catalogue item in REMS is created correctly on permit dataset creation. User information is fetch'd from token. """ - self._set_http_authorization('owner') + self._set_http_authorization("owner") # modify catalog record - self.cr_test_data['user_created'] = self.token['CSCUserName'] - self.cr_test_data['metadata_provider_user'] = self.token['CSCUserName'] - self.cr_test_data['metadata_provider_org'] = self.token['schacHomeOrganization'] - self.cr_test_data['metadata_owner_org'] = self.token['schacHomeOrganization'] - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["user_created"] = self.token["CSCUserName"] + self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] + self.cr_test_data["metadata_provider_org"] = self.token["schacHomeOrganization"] + self.cr_test_data["metadata_owner_org"] = self.token["schacHomeOrganization"] + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG # end user doesn't have permissions to the files and they are also not needed in this test - del self.cr_test_data['research_dataset']['files'] + del self.cr_test_data["research_dataset"]["files"] - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) @responses.activate @@ -4425,15 +5185,15 @@ def test_deleting_permit_dataset_removes_catalogue_item_succeeds(self): response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # delete dataset - response = self.client.delete(f'/rest/datasets/{cr_id}') + response = self.client.delete(f"/rest/datasets/{cr_id}") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - cr = self.client.get(f'/rest/datasets/{cr_id}?removed').data - self.assertTrue(cr.get('rems_identifier') is None, 'rems_identifier should not be present') - self.assertTrue(cr.get('access_granter') is None, 'access_granter should not be present') + cr = self.client.get(f"/rest/datasets/{cr_id}?removed").data + self.assertTrue(cr.get("rems_identifier") is None, "rems_identifier should not be present") + self.assertTrue(cr.get("access_granter") is None, "access_granter should not be present") @responses.activate def test_deleting_permit_dataset_removes_catalogue_item_fails(self): @@ -4441,7 +5201,7 @@ def test_deleting_permit_dataset_removes_catalogue_item_fails(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # delete dataset - self._mock_rems_access_return_error('PUT', 'catalogue-item', 'enabled') + self._mock_rems_access_return_error("PUT", "catalogue-item", "enabled") response = self.client.delete(f'/rest/datasets/{response.data["id"]}') self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -4453,12 +5213,20 @@ def test_deprecating_permit_dataset_removes_catalogue_item_succeeds(self): cr_before = response.data # deprecate dataset - response = self.client.delete(f"/rest/files/{cr_before['research_dataset']['files'][0]['identifier']}") + response = self.client.delete( + f"/rest/files/{cr_before['research_dataset']['files'][0]['identifier']}" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = self.client.get(f'/rest/datasets/{cr_before["id"]}').data - self.assertTrue(cr_after.get('rems_identifier') is None, 'rems_identifier should not be present') - self.assertTrue(cr_after.get('access_granter') is None, 'access_granter should not be present') + self.assertTrue( + cr_after.get("rems_identifier") is None, + "rems_identifier should not be present", + ) + self.assertTrue( + cr_after.get("access_granter") is None, + "access_granter should not be present", + ) @responses.activate def test_deprecating_permit_dataset_removes_catalogue_item_fails(self): @@ -4466,11 +5234,13 @@ def test_deprecating_permit_dataset_removes_catalogue_item_fails(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # deprecate dataset - self._mock_rems_access_crashes('PUT', 'workflow', 'archived') + self._mock_rems_access_crashes("PUT", "workflow", "archived") - response = self.client.delete(f"/rest/files/{response.data['research_dataset']['files'][0]['identifier']}") + response = self.client.delete( + f"/rest/files/{response.data['research_dataset']['files'][0]['identifier']}" + ) self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue('failed to publish' in response.data['detail'][0], response.data) + self.assertTrue("failed to publish" in response.data["detail"][0], response.data) def test_missing_access_granter(self): """ @@ -4479,70 +5249,78 @@ def test_missing_access_granter(self): """ # test on create - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('access_granter' in response.data['detail'][0], response.data) + self.assertTrue("access_granter" in response.data["detail"][0], response.data) # test on update - self.cr_test_data['research_dataset']['access_rights'] = self.open_rights - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - cr['research_dataset']['access_rights'] = self.permit_rights + cr["research_dataset"]["access_rights"] = self.permit_rights response = self.client.put(f'/rest/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('access_granter' in response.data['detail'][0], response.data) + self.assertTrue("access_granter" in response.data["detail"][0], response.data) def test_bad_access_granter_parameter(self): """ Access_granter values must be strings """ - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG - self.cr_test_data['access_granter'] = self._get_access_granter(malformed=True) + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG + self.cr_test_data["access_granter"] = self._get_access_granter(malformed=True) - response = self.client.post( - '/rest/datasets', - self.cr_test_data, - format="json" - ) + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('must be string' in response.data['detail'][0], response.data) + self.assertTrue("must be string" in response.data["detail"][0], response.data) def test_missing_license_in_dataset(self): """ License is required when dataset is REMS managed """ - self.cr_test_data['research_dataset']['access_rights'] = deepcopy(self.permit_rights) - del self.cr_test_data['research_dataset']['access_rights']['license'] - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = deepcopy(self.permit_rights) + del self.cr_test_data["research_dataset"]["access_rights"]["license"] + self.cr_test_data["data_catalog"] = IDA_CATALOG response = self.client.post( - f'/rest/datasets?access_granter={self._get_access_granter()}', + f"/rest/datasets?access_granter={self._get_access_granter()}", self.cr_test_data, - format="json" + format="json", ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('must define license' in response.data['detail'][0], response.data) + self.assertTrue("must define license" in response.data["detail"][0], response.data) @responses.activate def test_only_return_rems_info_to_privileged(self): - self._set_http_authorization('service') + self._set_http_authorization("service") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(response.data.get('rems_identifier') is not None, 'rems_identifier should be returned to owner') - self.assertTrue(response.data.get('access_granter') is not None, 'access_granter should be returned to owner') + self.assertTrue( + response.data.get("rems_identifier") is not None, + "rems_identifier should be returned to owner", + ) + self.assertTrue( + response.data.get("access_granter") is not None, + "access_granter should be returned to owner", + ) - self._set_http_authorization('no') + self._set_http_authorization("no") response = self.client.get(f'/rest/datasets/{response.data["id"]}') self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data.get('rems_identifier') is None, 'rems_identifier should not be returned to Anon') - self.assertTrue(response.data.get('access_granter') is None, 'access_granter should not be returned to Anon') + self.assertTrue( + response.data.get("rems_identifier") is None, + "rems_identifier should not be returned to Anon", + ) + self.assertTrue( + response.data.get("access_granter") is None, + "access_granter should not be returned to Anon", + ) @responses.activate def test_rems_info_cannot_be_changed(self): @@ -4551,10 +5329,18 @@ def test_rems_info_cannot_be_changed(self): cr = response.data - cr['rems_identifier'] = 'some:new:identifier' - cr['access_granter']['name'] = 'New Name' + cr["rems_identifier"] = "some:new:identifier" + cr["access_granter"]["name"] = "New Name" response = self.client.put(f'/rest/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertNotEqual(response.data['rems_identifier'], 'some:new:identifier', 'rems_id should not be changed') - self.assertNotEqual(response.data['access_granter'], 'New Name', 'access_granter should not be changed') + self.assertNotEqual( + response.data["rems_identifier"], + "some:new:identifier", + "rems_id should not be changed", + ) + self.assertNotEqual( + response.data["access_granter"], + "New Name", + "access_granter should not be changed", + ) diff --git a/src/metax_api/tests/api/rest/base/views/directories/read.py b/src/metax_api/tests/api/rest/base/views/directories/read.py index 9ebce164..0dff8efc 100755 --- a/src/metax_api/tests/api/rest/base/views/directories/read.py +++ b/src/metax_api/tests/api/rest/base/views/directories/read.py @@ -19,19 +19,18 @@ class DirectoryApiReadCommon(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DirectoryApiReadCommon, cls).setUpClass() def setUp(self): - dir_from_test_data = self._get_object_from_test_data('directory') - self.identifier = dir_from_test_data['identifier'] - self.pk = dir_from_test_data['id'] + dir_from_test_data = self._get_object_from_test_data("directory") + self.identifier = dir_from_test_data["identifier"] + self.pk = dir_from_test_data["id"] self._use_http_authorization() def _create_test_dirs(self, count): @@ -39,39 +38,49 @@ def _create_test_dirs(self, count): with transaction.atomic(): for n in range(1, count): f = self._get_new_file_data(str(n)) - self.client.post('/rest/files', f, format="json") + self.client.post("/rest/files", f, format="json") def _get_dirs_files_ids(self, url): file_data = self.client.get(url).data if isinstance(file_data, dict): - return {key: [f['id'] for f in file_data[key]] for key in file_data.keys() - if key in ['directories', 'files']} + return { + key: [f["id"] for f in file_data[key]] + for key in file_data.keys() + if key in ["directories", "files"] + } else: - return [f['id'] for f in file_data] + return [f["id"] for f in file_data] class DirectoryApiReadBasicTests(DirectoryApiReadCommon): - def test_read_directory_list(self): - response = self.client.get('/rest/directories') + response = self.client.get("/rest/directories") self.assertEqual(response.status_code, 501) def test_read_directory_details_by_pk(self): - response = self.client.get('/rest/directories/%s' % self.pk) + response = self.client.get("/rest/directories/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('directory_name' in response.data.keys(), True) - self.assertEqual(response.data['identifier'], self.identifier) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("directory_name" in response.data.keys(), True) + self.assertEqual(response.data["identifier"], self.identifier) def test_read_directory_details_by_identifier(self): - response = self.client.get('/rest/directories/%s' % self.identifier) + response = self.client.get("/rest/directories/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('directory_name' in response.data.keys(), True) - self.assertEqual(response.data['identifier'], self.identifier) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("directory_name" in response.data.keys(), True) + self.assertEqual(response.data["identifier"], self.identifier) def test_read_directory_details_not_found(self): - response = self.client.get('/rest/directories/shouldnotexist') + response = self.client.get("/rest/directories/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) @@ -85,37 +94,37 @@ def test_read_directory_get_files(self): """ Test browsing files """ - response = self.client.get('/rest/directories/2/files') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 3) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 2) - self.assertEqual(len(response.data['files']), 0) - - response = self.client.get('/rest/directories/3/files') - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 4) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 3) - self.assertEqual(len(response.data['files']), 5) - self.assertEqual(response.data['files'][0]['parent_directory']['id'], 3) - self.assertEqual(response.data['files'][4]['parent_directory']['id'], 3) - - response = self.client.get('/rest/directories/4/files') - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 4) - self.assertEqual(len(response.data['files']), 5) - self.assertEqual(response.data['files'][0]['parent_directory']['id'], 4) - self.assertEqual(response.data['files'][4]['parent_directory']['id'], 4) - - response = self.client.get('/rest/directories/5/files') - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 0) - - response = self.client.get('/rest/directories/6/files') - self.assertEqual(len(response.data['directories']), 0) - self.assertEqual(len(response.data['files']), 10) - self.assertEqual(response.data['files'][0]['parent_directory']['id'], 6) - self.assertEqual(response.data['files'][9]['parent_directory']['id'], 6) + response = self.client.get("/rest/directories/2/files") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 3) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 2) + self.assertEqual(len(response.data["files"]), 0) + + response = self.client.get("/rest/directories/3/files") + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 4) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 3) + self.assertEqual(len(response.data["files"]), 5) + self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 3) + self.assertEqual(response.data["files"][4]["parent_directory"]["id"], 3) + + response = self.client.get("/rest/directories/4/files") + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 4) + self.assertEqual(len(response.data["files"]), 5) + self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 4) + self.assertEqual(response.data["files"][4]["parent_directory"]["id"], 4) + + response = self.client.get("/rest/directories/5/files") + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 0) + + response = self.client.get("/rest/directories/6/files") + self.assertEqual(len(response.data["directories"]), 0) + self.assertEqual(len(response.data["files"]), 10) + self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 6) + self.assertEqual(response.data["files"][9]["parent_directory"]["id"], 6) def test_read_directory_get_files_recursively(self): """ @@ -123,85 +132,88 @@ def test_read_directory_get_files_recursively(self): """ # without depth, returns from depth=1, which should contain no files - response = self.client.get('/rest/directories/1/files?recursive') + response = self.client.get("/rest/directories/1/files?recursive") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), 0) # dir id 1 (the root) contains 0 files, but recursively 20 - response = self.client.get('/rest/directories/1/files?recursive=true&depth=*') + response = self.client.get("/rest/directories/1/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 20) # dir id 3 contains 5 files, but recursively 20 - response = self.client.get('/rest/directories/3/files?recursive=true&depth=*') + response = self.client.get("/rest/directories/3/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 20) # dir id 4 contains 5 files, but recursively 15 - response = self.client.get('/rest/directories/4/files?recursive=true&depth=*') + response = self.client.get("/rest/directories/4/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 15) # dir id 5 contains 0 files - response = self.client.get('/rest/directories/5/files?recursive=true&depth=*') + response = self.client.get("/rest/directories/5/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) # dir id 6 contains 10 files - response = self.client.get('/rest/directories/6/files?recursive=true&depth=*') + response = self.client.get("/rest/directories/6/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) def test_read_directory_get_files_file_not_found(self): - response = self.client.get('/rest/directories/not_found/files') + response = self.client.get("/rest/directories/not_found/files") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_directory_get_project_root_directory(self): - response = self.client.get('/rest/directories/root?project=project_x') + response = self.client.get("/rest/directories/root?project=project_x") self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) - self.assertEqual(response.data['id'], 1) - self.assertEqual('directories' in response.data, True) - self.assertEqual('files' in response.data, True) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 2) + self.assertEqual(response.data["id"], 1) + self.assertEqual("directories" in response.data, True) + self.assertEqual("files" in response.data, True) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 2) def test_read_directory_get_project_root_directory_not_found(self): - response = self.client.get('/rest/directories/root?project=project_xyz') + response = self.client.get("/rest/directories/root?project=project_xyz") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_directory_get_project_root_directory_parameter_missing(self): - response = self.client.get('/rest/directories/root') + response = self.client.get("/rest/directories/root") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('required' in response.data['detail'][0], True, response.data) + self.assertEqual("required" in response.data["detail"][0], True, response.data) def test_read_directory_get_files_by_path(self): dr = Directory.objects.get(pk=2) - response = self.client.get('/rest/directories/files?path=%s&project=%s' % - (dr.directory_path, dr.project_identifier)) + response = self.client.get( + "/rest/directories/files?path=%s&project=%s" + % (dr.directory_path, dr.project_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 3) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 2) - self.assertEqual(len(response.data['files']), 0) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 3) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 2) + self.assertEqual(len(response.data["files"]), 0) def test_read_directory_get_files_by_path_not_found(self): - response = self.client.get('/rest/directories/files?path=%s&project=%s' % - ('doesnotexist', 'doesnotexist')) + response = self.client.get( + "/rest/directories/files?path=%s&project=%s" % ("doesnotexist", "doesnotexist") + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_directory_get_files_by_path_check_parameters(self): - response = self.client.get('/rest/directories/files') + response = self.client.get("/rest/directories/files") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/directories/files?path=something') + response = self.client.get("/rest/directories/files?path=something") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/directories/files?project=something') + response = self.client.get("/rest/directories/files?project=something") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_recursively_with_max_depth(self): """ Should return a flat list of files, three directories deep """ - response = self.client.get('/rest/directories/2/files?recursive=true&depth=3') + response = self.client.get("/rest/directories/2/files?recursive=true&depth=3") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) @@ -209,11 +221,13 @@ def test_read_directory_recursively_with_dirs_only_and_max_depth(self): """ Should return a directory hierarchy, three directories deep, with no files at all. """ - response = self.client.get('/rest/directories/2/files?recursive=true&directories_only=true&depth=3') + response = self.client.get( + "/rest/directories/2/files?recursive=true&directories_only=true&depth=3" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('directories' in response.data, True) - self.assertEqual('directories' in response.data['directories'][0], True) - self.assertEqual('directories' in response.data['directories'][0]['directories'][0], True) + self.assertEqual("directories" in response.data, True) + self.assertEqual("directories" in response.data["directories"][0], True) + self.assertEqual("directories" in response.data["directories"][0]["directories"][0], True) def test_read_directory_recursively_with_no_depth(self): """ @@ -222,162 +236,228 @@ def test_read_directory_recursively_with_no_depth(self): Using parameter directories_only=true to easier count the depth. """ - response = self.client.get('/rest/directories/3/files?recursive=true&directories_only=true') + response = self.client.get("/rest/directories/3/files?recursive=true&directories_only=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('directories' in response.data, True) - self.assertEqual('directories' in response.data['directories'][0], True) + self.assertEqual("directories" in response.data, True) + self.assertEqual("directories" in response.data["directories"][0], True) def test_read_directory_return_directories_only(self): - response = self.client.get('/rest/directories/3/files?directories_only') + response = self.client.get("/rest/directories/3/files?directories_only") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual('files' in response.data, False) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual("files" in response.data, False) def test_read_directory_with_include_parent(self): - response = self.client.get('/rest/directories/3/files?include_parent') + response = self.client.get("/rest/directories/3/files?include_parent") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 5) - self.assertEqual(response.data.get('id', None), 3) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 5) + self.assertEqual(response.data.get("id", None), 3) def test_read_directory_files_sorted_by_file_path(self): - response = self.client.get('/rest/directories/3/files') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['files'][0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_1') - self.assertEqual(response.data['files'][1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_2') - self.assertEqual(response.data['files'][2]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') - - response = self.client.get('/rest/directories/3/files?pagination&limit=2&offset=2') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results']['files'][0]['file_path'], - '/project_x_FROZEN/Experiment_X/file_name_2') - self.assertEqual(response.data['results']['files'][1]['file_path'], - '/project_x_FROZEN/Experiment_X/file_name_3') - - response = self.client.get('/rest/directories/3/files?cr_identifier=2') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['files'][0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') - self.assertEqual(response.data['files'][1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_4') - - response = self.client.get('/rest/directories/3/files?recursive') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data[0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_1') - self.assertEqual(response.data[1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_2') - self.assertEqual(response.data[2]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') - - response = self.client.get('/rest/directories/3/files?recursive&cr_identifier=2') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data[0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') - self.assertEqual(response.data[1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_4') + response = self.client.get("/rest/directories/3/files") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data["files"][0]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_1", + ) + self.assertEqual( + response.data["files"][1]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_2", + ) + self.assertEqual( + response.data["files"][2]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_3", + ) + + response = self.client.get("/rest/directories/3/files?pagination&limit=2&offset=2") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data["results"]["files"][0]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_2", + ) + self.assertEqual( + response.data["results"]["files"][1]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_3", + ) + + response = self.client.get("/rest/directories/3/files?cr_identifier=2") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data["files"][0]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_3", + ) + self.assertEqual( + response.data["files"][1]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_4", + ) + + response = self.client.get("/rest/directories/3/files?recursive") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data[0]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_1" + ) + self.assertEqual( + response.data[1]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_2" + ) + self.assertEqual( + response.data[2]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_3" + ) + + response = self.client.get("/rest/directories/3/files?recursive&cr_identifier=2") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data[0]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_3" + ) + self.assertEqual( + response.data[1]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_4" + ) def test_read_directory_directories_sorted_by_directory_path(self): - response = self.client.get('/rest/directories/8/files') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['directories'][0]['directory_path'], - '/prj_112_root/other') - self.assertEqual(response.data['directories'][1]['directory_path'], - '/prj_112_root/random_folder', response.data) - self.assertEqual(response.data['directories'][2]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['directories'][3]['directory_path'], - '/prj_112_root/science_data_B') - self.assertEqual(response.data['directories'][4]['directory_path'], - '/prj_112_root/science_data_C') - - response = self.client.get('/rest/directories/8/files?pagination&limit=2&offset=2') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results']['directories'][0]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['results']['directories'][1]['directory_path'], - '/prj_112_root/science_data_B') - - response = self.client.get('/rest/directories/8/files?directories_only') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['directories'][0]['directory_path'], - '/prj_112_root/other') - self.assertEqual(response.data['directories'][1]['directory_path'], - '/prj_112_root/random_folder', response.data) - self.assertEqual(response.data['directories'][2]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['directories'][3]['directory_path'], - '/prj_112_root/science_data_B') - self.assertEqual(response.data['directories'][4]['directory_path'], - '/prj_112_root/science_data_C') - - response = self.client.get('/rest/directories/8/files?cr_identifier=13') + response = self.client.get("/rest/directories/8/files") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") + self.assertEqual( + response.data["directories"][1]["directory_path"], + "/prj_112_root/random_folder", + response.data, + ) + self.assertEqual( + response.data["directories"][2]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["directories"][3]["directory_path"], + "/prj_112_root/science_data_B", + ) + self.assertEqual( + response.data["directories"][4]["directory_path"], + "/prj_112_root/science_data_C", + ) + + response = self.client.get("/rest/directories/8/files?pagination&limit=2&offset=2") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data["results"]["directories"][0]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["results"]["directories"][1]["directory_path"], + "/prj_112_root/science_data_B", + ) + + response = self.client.get("/rest/directories/8/files?directories_only") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") + self.assertEqual( + response.data["directories"][1]["directory_path"], + "/prj_112_root/random_folder", + response.data, + ) + self.assertEqual( + response.data["directories"][2]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["directories"][3]["directory_path"], + "/prj_112_root/science_data_B", + ) + self.assertEqual( + response.data["directories"][4]["directory_path"], + "/prj_112_root/science_data_C", + ) + + response = self.client.get("/rest/directories/8/files?cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['directories'][0]['directory_path'], - '/prj_112_root/other') - self.assertEqual(response.data['directories'][1]['directory_path'], - '/prj_112_root/random_folder') - self.assertEqual(response.data['directories'][2]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['directories'][3]['directory_path'], - '/prj_112_root/science_data_B') + self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") + self.assertEqual( + response.data["directories"][1]["directory_path"], + "/prj_112_root/random_folder", + ) + self.assertEqual( + response.data["directories"][2]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["directories"][3]["directory_path"], + "/prj_112_root/science_data_B", + ) class DirectoryApiReadFileBrowsingRetrieveSpecificFieldsTests(DirectoryApiReadCommon): - def test_retrieve_requested_directory_fields_only(self): - response = self.client.get('/rest/directories/3/files?directory_fields=identifier,directory_path') + response = self.client.get( + "/rest/directories/3/files?directory_fields=identifier,directory_path" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('directory_path' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("directory_path" in response.data["directories"][0], True) - response = self.client.get('/rest/directories/17/files? \ - cr_identifier=13&directory_fields=directory_name&directories_only&recursive') + response = self.client.get( + "/rest/directories/17/files? \ + cr_identifier=13&directory_fields=directory_name&directories_only&recursive" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertTrue('directory_name' in response.data['directories'][0]) - self.assertTrue('directories' in response.data['directories'][0]) - self.assertFalse('id' in response.data['directories'][0]) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertTrue("directory_name" in response.data["directories"][0]) + self.assertTrue("directories" in response.data["directories"][0]) + self.assertFalse("id" in response.data["directories"][0]) def test_retrieve_directory_byte_size_and_file_count(self): """ There is some additional logic involved in retrieving byte_size and file_count, which warrants targeted tests for just those fields. """ - response = self.client.get('/rest/directories/3/files?directory_fields=identifier,byte_size') + response = self.client.get( + "/rest/directories/3/files?directory_fields=identifier,byte_size" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('byte_size' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("byte_size" in response.data["directories"][0], True) - response = self.client.get('/rest/directories/3/files?directory_fields=identifier,file_count') + response = self.client.get( + "/rest/directories/3/files?directory_fields=identifier,file_count" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('file_count' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("file_count" in response.data["directories"][0], True) - response = self.client.get('/rest/directories/3/files?directory_fields=identifier,file_count&cr_identifier=3') + response = self.client.get( + "/rest/directories/3/files?directory_fields=identifier,file_count&cr_identifier=3" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertTrue('identifier' in response.data['directories'][0]) - self.assertTrue('file_count' in response.data['directories'][0]) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertTrue("identifier" in response.data["directories"][0]) + self.assertTrue("file_count" in response.data["directories"][0]) response = self.client.get( - '/rest/directories/3/files?directory_fields=identifier,file_count¬_cr_identifier=2') + "/rest/directories/3/files?directory_fields=identifier,file_count¬_cr_identifier=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertTrue('identifier' in response.data['directories'][0]) - self.assertTrue('file_count' in response.data['directories'][0]) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertTrue("identifier" in response.data["directories"][0]) + self.assertTrue("file_count" in response.data["directories"][0]) def test_retrieve_requested_file_fields_only(self): - response = self.client.get('/rest/directories/3/files?file_fields=identifier,file_path') + response = self.client.get("/rest/directories/3/files?file_fields=identifier,file_path") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files'][0].keys()), 2) - self.assertEqual('identifier' in response.data['files'][0], True) - self.assertEqual('file_path' in response.data['files'][0], True) + self.assertEqual(len(response.data["files"][0].keys()), 2) + self.assertEqual("identifier" in response.data["files"][0], True) + self.assertEqual("file_path" in response.data["files"][0], True) def test_retrieve_requested_file_and_directory_fields_only(self): - response = self.client.get('/rest/directories/3/files?file_fields=identifier&directory_fields=id') + response = self.client.get( + "/rest/directories/3/files?file_fields=identifier&directory_fields=id" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files'][0].keys()), 1) - self.assertEqual('identifier' in response.data['files'][0], True) - self.assertEqual(len(response.data['directories'][0].keys()), 1) - self.assertEqual('id' in response.data['directories'][0], True) + self.assertEqual(len(response.data["files"][0].keys()), 1) + self.assertEqual("identifier" in response.data["files"][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 1) + self.assertEqual("id" in response.data["directories"][0], True) def test_not_retrieving_not_allowed_directory_fields(self): from metax_api.api.rest.base.serializers import DirectorySerializer, FileSerializer @@ -385,19 +465,27 @@ def test_not_retrieving_not_allowed_directory_fields(self): allowed_dir_fields = set(DirectorySerializer.Meta.fields) allowed_file_fields = set(FileSerializer.Meta.fields) - response = self.client.get('/rest/directories/3/files?file_fields=parent,id&directory_fields=;;drop db;,id') + response = self.client.get( + "/rest/directories/3/files?file_fields=parent,id&directory_fields=;;drop db;,id" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(any(field in response.data['files'][0].keys() for field in allowed_file_fields)) - self.assertTrue(any(field in response.data['directories'][0].keys() for field in allowed_dir_fields)) + self.assertTrue( + any(field in response.data["files"][0].keys() for field in allowed_file_fields) + ) + self.assertTrue( + any(field in response.data["directories"][0].keys() for field in allowed_dir_fields) + ) - response = self.client.get('/rest/directories/3/files?file_fields=parent&directory_fields=or') + response = self.client.get( + "/rest/directories/3/files?file_fields=parent&directory_fields=or" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/directories/3/files?file_fields=parent') + response = self.client.get("/rest/directories/3/files?file_fields=parent") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/directories/3/files?directory_fields=or') + response = self.client.get("/rest/directories/3/files?directory_fields=or") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) @@ -410,50 +498,55 @@ class DirectoryApiReadCatalogRecordFileBrowsingTests(DirectoryApiReadCommon): """ def setUp(self): - self._set_http_authorization('service') - self.client.get('/rest/directories/update_byte_sizes_and_file_counts') - self.client.get('/rest/datasets/update_cr_directory_browsing_data') + self._set_http_authorization("service") + self.client.get("/rest/directories/update_byte_sizes_and_file_counts") + self.client.get("/rest/datasets/update_cr_directory_browsing_data") def test_read_directory_catalog_record_and_not_catalog_record_not_ok(self): """ Test query parameter 'cr_identifier' and 'not_cr_identifier' can not be queried together. """ - response = self.client.get('/rest/directories/3/files?cr_identifier=1¬_cr_identifier=2') + response = self.client.get("/rest/directories/3/files?cr_identifier=1¬_cr_identifier=2") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("one query parameter of 'cr_identifier' and 'not_cr_identifier'" in response.data['detail'][0]) + self.assertTrue( + "one query parameter of 'cr_identifier' and 'not_cr_identifier'" + in response.data["detail"][0] + ) def test_read_directory_for_catalog_record(self): """ Test query parameter 'cr_identifier'. """ - response = self.client.get('/rest/directories/3/files?cr_identifier=%s' - % CatalogRecord.objects.get(pk=1).identifier) + response = self.client.get( + "/rest/directories/3/files?cr_identifier=%s" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('directories' in response.data, True) - self.assertEqual('files' in response.data, True) - self.assertEqual(len(response.data['directories']), 0) - self.assertEqual(len(response.data['files']), 2) - for f in response.data['files']: - self.assertTrue(f['parent_directory']['id'], 1) + self.assertEqual("directories" in response.data, True) + self.assertEqual("files" in response.data, True) + self.assertEqual(len(response.data["directories"]), 0) + self.assertEqual(len(response.data["files"]), 2) + for f in response.data["files"]: + self.assertTrue(f["parent_directory"]["id"], 1) def test_read_directory_for_not_catalog_record(self): """ Test query parameter 'not_cr_identifier'. """ - response = self.client.get('/rest/directories/3/files?not_cr_identifier=2') + response = self.client.get("/rest/directories/3/files?not_cr_identifier=2") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 3, response.data) - for f in response.data['files']: - self.assertNotEqual(f['parent_directory']['id'], 2, response.data) - self.assertEqual(len(response.data['directories']), 1, response.data) - self.assertNotEqual(response.data['directories'][0]['parent_directory']['id'], 2) + self.assertEqual(len(response.data["files"]), 3, response.data) + for f in response.data["files"]: + self.assertNotEqual(f["parent_directory"]["id"], 2, response.data) + self.assertEqual(len(response.data["directories"]), 1, response.data) + self.assertNotEqual(response.data["directories"][0]["parent_directory"]["id"], 2) def test_read_directory_for_catalog_record_not_found(self): """ Not found cr_identifier should raise 400 instead of 404, which is raised when the directory itself is not found. the error contains details about the 400. """ - response = self.client.get('/rest/directories/3/files?cr_identifier=notexisting') + response = self.client.get("/rest/directories/3/files?cr_identifier=notexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_for_not_catalog_record_not_found(self): @@ -461,7 +554,7 @@ def test_read_directory_for_not_catalog_record_not_found(self): Not found cr_identifier should raise 400 instead of 404, which is raised when the directory itself is not found. the error contains details about the 400. """ - response = self.client.get('/rest/directories/3/files?not_cr_identifier=notexisting') + response = self.client.get("/rest/directories/3/files?not_cr_identifier=notexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_for_catalog_record_directory_does_not_exist(self): @@ -471,61 +564,69 @@ def test_read_directory_for_catalog_record_directory_does_not_exist(self): """ # should be OK... - response = self.client.get('/rest/directories/4/files') + response = self.client.get("/rest/directories/4/files") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 5) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 5) # ... but should not contain any files FOR THIS CR - response = self.client.get('/rest/directories/4/files?cr_identifier=1') + response = self.client.get("/rest/directories/4/files?cr_identifier=1") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) # ... and should contain files ALL BUT THIS CR - response = self.client.get('/rest/directories/4/files?not_cr_identifier=1') + response = self.client.get("/rest/directories/4/files?not_cr_identifier=1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 5) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 5) def test_read_directory_for_catalog_record_recursively(self): """ Test query parameters 'cr_identifier' with 'recursive'. """ - response = self.client.get('/rest/directories/1/files?recursive&cr_identifier=%s&depth=*' - % CatalogRecord.objects.get(pk=1).identifier) + response = self.client.get( + "/rest/directories/1/files?recursive&cr_identifier=%s&depth=*" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - file_list = list(File.objects.filter(record__pk=1).values_list('id', flat=True)) + file_list = list(File.objects.filter(record__pk=1).values_list("id", flat=True)) self.assertEqual(len(response.data), len(file_list)) for f in response.data: - self.assertTrue(f['id'] in file_list) + self.assertTrue(f["id"] in file_list) - response = self.client.get('/rest/directories/1/files?recursive&cr_identifier=1&depth=*&directories_only') + response = self.client.get( + "/rest/directories/1/files?recursive&cr_identifier=1&depth=*&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['directories'][0]['directories']), 1) - self.assertEqual(len(response.data['directories'][0]['directories'][0]['directories']), 0) - self.assertFalse(response.data.get('files')) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["directories"][0]["directories"]), 1) + self.assertEqual(len(response.data["directories"][0]["directories"][0]["directories"]), 0) + self.assertFalse(response.data.get("files")) # not found cr_identifier should raise 400 instead of 404, which is raised when the # directory itself is not found. the error contains details about the 400 - response = self.client.get('/rest/directories/1/files?recursive&cr_identifier=notexisting') + response = self.client.get("/rest/directories/1/files?recursive&cr_identifier=notexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_for_not_catalog_record_recursively(self): """ Test query parameters 'not_cr_identifier' with 'recursive'. """ - file_recursive = self.client.get('/rest/directories/1/files?recursive&depth=*').data - file_list = list(File.objects.filter(record__pk=1).values_list('id', flat=True)) - response = self.client.get('/rest/directories/1/files?recursive&depth=*¬_cr_identifier=%s' - % CatalogRecord.objects.get(pk=1).identifier) + file_recursive = self.client.get("/rest/directories/1/files?recursive&depth=*").data + file_list = list(File.objects.filter(record__pk=1).values_list("id", flat=True)) + response = self.client.get( + "/rest/directories/1/files?recursive&depth=*¬_cr_identifier=%s" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), len(file_recursive) - len(file_list)) for f in response.data: - self.assertTrue(f['id'] not in file_list) + self.assertTrue(f["id"] not in file_list) # not found not_cr_identifier should raise 400 instead of 404, which is raised when the # directory itself is not found. the error contains details about the 400 - response = self.client.get('/rest/directories/1/files?recursive¬_cr_identifier=notexisting') + response = self.client.get( + "/rest/directories/1/files?recursive¬_cr_identifier=notexisting" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_directory_byte_size_and_file_count(self): @@ -533,53 +634,62 @@ def test_directory_byte_size_and_file_count(self): Test byte size and file count are calculated correctly for directories when browsing files in the context of a single record. """ + def _assert_dir_calculations(cr, dr): """ Assert directory numbers received from browsing-api matches what exists in the db when making a reasonably fool-proof query of files by directory path """ - self.assertEqual('byte_size' in dr, True) - self.assertEqual('file_count' in dr, True) + self.assertEqual("byte_size" in dr, True) + self.assertEqual("file_count" in dr, True) - byte_size = cr.files.filter(file_path__startswith='%s/' % dr['directory_path']) \ - .aggregate(Sum('byte_size'))['byte_size__sum'] + byte_size = cr.files.filter( + file_path__startswith="%s/" % dr["directory_path"] + ).aggregate(Sum("byte_size"))["byte_size__sum"] - file_count = cr.files.filter(file_path__startswith='%s/' % dr['directory_path']).count() + file_count = cr.files.filter(file_path__startswith="%s/" % dr["directory_path"]).count() - self.assertEqual(dr['byte_size'], byte_size, 'path: %s' % dr['directory_path']) - self.assertEqual(dr['file_count'], file_count, 'path: %s' % dr['directory_path']) + self.assertEqual(dr["byte_size"], byte_size, "path: %s" % dr["directory_path"]) + self.assertEqual(dr["file_count"], file_count, "path: %s" % dr["directory_path"]) # prepare a new test dataset which contains a directory from testdata, which contains a decent # qty of files and complexity - dr = Directory.objects.get(directory_path='/prj_112_root') + dr = Directory.objects.get(directory_path="/prj_112_root") cr_with_dirs = CatalogRecord.objects.get(pk=13) - cr_data = response = self.client.get('/rest/datasets/1').data - cr_data.pop('id') - cr_data.pop('identifier') - cr_data['research_dataset'].pop('preferred_identifier') - cr_data['research_dataset']['directories'] = [{ - 'identifier': dr.identifier, - 'title': 'test dir', - 'use_category': { - 'identifier': cr_with_dirs.research_dataset['directories'][0]['use_category']['identifier'] + cr_data = response = self.client.get("/rest/datasets/1").data + cr_data.pop("id") + cr_data.pop("identifier") + cr_data["research_dataset"].pop("preferred_identifier") + cr_data["research_dataset"]["directories"] = [ + { + "identifier": dr.identifier, + "title": "test dir", + "use_category": { + "identifier": cr_with_dirs.research_dataset["directories"][0]["use_category"][ + "identifier" + ] + }, } - }] - self._use_http_authorization(username='metax') - cr_data = response = self.client.post('/rest/datasets', cr_data, format='json').data - cr = CatalogRecord.objects.get(pk=cr_data['id']) + ] + self._use_http_authorization(username="metax") + cr_data = response = self.client.post("/rest/datasets", cr_data, format="json").data + cr = CatalogRecord.objects.get(pk=cr_data["id"]) # begin tests # test: browse the file api, and receive a list of sub-directories - response = self.client.get('/rest/directories/%d/files?cr_identifier=%s' % (dr.id, cr.identifier)) + response = self.client.get( + "/rest/directories/%d/files?cr_identifier=%s" % (dr.id, cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - for directory in response.data['directories']: + for directory in response.data["directories"]: _assert_dir_calculations(cr, directory) # test: browse with ?include_parent=true to get the dir directly that was added to the dataset - response = self.client.get('/rest/directories/%d/files?cr_identifier=%s&include_parent' - % (dr.id, cr.identifier)) + response = self.client.get( + "/rest/directories/%d/files?cr_identifier=%s&include_parent" % (dr.id, cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) _assert_dir_calculations(cr, response.data) @@ -587,40 +697,54 @@ def test_directory_byte_size_and_file_count_in_parent_directories(self): cr_id = 13 def _assertDirectoryData(id, parent_data): - response = self.client.get('/rest/directories/%d/files?cr_identifier=%d&include_parent' % (id, cr_id)) + response = self.client.get( + "/rest/directories/%d/files?cr_identifier=%d&include_parent" % (id, cr_id) + ) - self.assertEqual(response.data['byte_size'], parent_data[id][0], response.data['id']) - self.assertEqual(response.data['file_count'], parent_data[id][1]) + self.assertEqual(response.data["byte_size"], parent_data[id][0], response.data["id"]) + self.assertEqual(response.data["file_count"], parent_data[id][1]) - if response.data.get('parent_directory'): - return _assertDirectoryData(response.data['parent_directory']['id'], parent_data) + if response.data.get("parent_directory"): + return _assertDirectoryData(response.data["parent_directory"]["id"], parent_data) def _assertDirectoryData_not_cr_id(id, parent_data): - total_dir_res = self.client.get('/rest/directories/%d' % id) - total_dir_size = (total_dir_res.data.get('byte_size', None), total_dir_res.data.get('file_count', None)) - - response = self.client.get('/rest/directories/%d/files?not_cr_identifier=%d&include_parent' % (id, cr_id)) - if response.data.get('id'): - self.assertEqual(response.data['byte_size'], total_dir_size[0] - parent_data[id][0]) - self.assertEqual(response.data['file_count'], total_dir_size[1] - parent_data[id][1]) - - if response.data.get('parent_directory'): - return _assertDirectoryData_not_cr_id(response.data['parent_directory']['id'], parent_data) + total_dir_res = self.client.get("/rest/directories/%d" % id) + total_dir_size = ( + total_dir_res.data.get("byte_size", None), + total_dir_res.data.get("file_count", None), + ) + + response = self.client.get( + "/rest/directories/%d/files?not_cr_identifier=%d&include_parent" % (id, cr_id) + ) + if response.data.get("id"): + self.assertEqual(response.data["byte_size"], total_dir_size[0] - parent_data[id][0]) + self.assertEqual( + response.data["file_count"], total_dir_size[1] - parent_data[id][1] + ) + + if response.data.get("parent_directory"): + return _assertDirectoryData_not_cr_id( + response.data["parent_directory"]["id"], parent_data + ) def _get_parent_dir_data_for_cr(id): - def _get_parents(pk): pk = Directory.objects.get(pk=pk).parent_directory_id if pk: pks.append(pk) _get_parents(pk) - cr_data = CatalogRecord.objects.get(pk=id).files.order_by('parent_directory_id').values_list( - 'parent_directory_id').annotate(Sum('byte_size'), Count('id')) + cr_data = ( + CatalogRecord.objects.get(pk=id) + .files.order_by("parent_directory_id") + .values_list("parent_directory_id") + .annotate(Sum("byte_size"), Count("id")) + ) grouped_cr_data = {} for i in cr_data: - grouped_cr_data[i[0]] = [ int(i[1]), i[2] ] + grouped_cr_data[i[0]] = [int(i[1]), i[2]] drs = {} for dir in grouped_cr_data.keys(): @@ -639,12 +763,18 @@ def _get_parents(pk): # begin tests - cr = self.client.get('/rest/datasets/%d?fields=research_dataset' % cr_id) + cr = self.client.get("/rest/datasets/%d?fields=research_dataset" % cr_id) - dirs = set([Directory.objects.get(identifier=dr['identifier']).id - for dr in cr.data['research_dataset'].get('directories', [])] + - [File.objects.get(identifier=file['identifier']).parent_directory.id - for file in cr.data['research_dataset'].get('files', [])]) + dirs = set( + [ + Directory.objects.get(identifier=dr["identifier"]).id + for dr in cr.data["research_dataset"].get("directories", []) + ] + + [ + File.objects.get(identifier=file["identifier"]).parent_directory.id + for file in cr.data["research_dataset"].get("files", []) + ] + ) parent_data = _get_parent_dir_data_for_cr(cr_id) @@ -666,28 +796,32 @@ def test_returns_ok_for_open_catalog_record_if_no_authorization(self): # Verify /rest/directories//files?cr_identifier=cr_id returns dir files even without authorization for # open catalog record - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_ok_for_login_catalog_record_if_no_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(use_login_access_type=True) + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + use_login_access_type=True + ) # Verify /rest/directories//files?cr_identifier=cr_id returns dir files even without authorization for # login catalog record - self._assert_ok(login_cr_json, 'no') + self._assert_ok(login_cr_json, "no") def test_returns_ok_for_open_catalog_record_if_service_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify /rest/directories//files?cr_identifier=cr_id returns dir files with service authorization for # open catalog record - self._assert_ok(open_cr_json, 'service') + self._assert_ok(open_cr_json, "service") def test_returns_ok_for_login_catalog_record_if_service_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(use_login_access_type=True) + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + use_login_access_type=True + ) # Verify /rest/directories//files?cr_identifier=cr_id returns dir files with service authorization for # login catalog record - self._assert_ok(login_cr_json, 'service') + self._assert_ok(login_cr_json, "service") @responses.activate def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): @@ -696,7 +830,7 @@ def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): # Verify /rest/directories//files?cr_identifier=cr_id returns dir files with owner authorization for # owner-owned open catalog record - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") @responses.activate def test_returns_ok_for_login_catalog_record_if_owner_authorization(self): @@ -705,30 +839,36 @@ def test_returns_ok_for_login_catalog_record_if_owner_authorization(self): # Verify /rest/directories//files?cr_identifier=cr_id returns dir files with owner authorization for # owner-owned login_cr_json catalog record - self._assert_ok(login_cr_json, 'owner') + self._assert_ok(login_cr_json, "owner") def test_returns_ok_for_restricted_catalog_record_if_service_authorization(self): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify /rest/directories//files?cr_identifier=cr_id returns dir files with service authorization for # restricted catalog record - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate def test_returns_ok_for_restricted_catalog_record_if_owner_authorization(self): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify /rest/directories//files?cr_identifier=cr_id returns dir files with owner authorization for # owner-owned restricted catalog record - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify /rest/directories//files?cr_identifier=cr_id returns dir files without authorization # for embargoed catalog record whose embargo date has been reached - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS @@ -737,39 +877,50 @@ def test_returns_forbidden_for_restricted_catalog_record_if_no_authorization(sel # Verify /rest/directories//files?cr_identifier=cr_id returns forbidden without authorization # for restricted catalog record - self._assert_forbidden(restricted_cr_json, 'no') + self._assert_forbidden(restricted_cr_json, "no") - def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details( - False) + def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify /rest/directories//files?cr_identifier=cr_id returns forbidden without authorization # for embargoed catalog record whose embargo date has not been reached # Deactivate credentials - self._assert_forbidden(not_available_embargoed_cr_json, 'no') + self._assert_forbidden(not_available_embargoed_cr_json, "no") def _assert_forbidden(self, cr_json, credentials_type): - dir_id = cr_json['research_dataset']['directories'][0]['identifier'] - cr_id = cr_json['identifier'] + dir_id = cr_json["research_dataset"]["directories"][0]["identifier"] + cr_id = cr_json["identifier"] self._set_http_authorization(credentials_type) - response = self.client.get('/rest/directories/{0}/files?cr_identifier={1}'.format(dir_id, cr_id)) + response = self.client.get( + "/rest/directories/{0}/files?cr_identifier={1}".format(dir_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.get('/rest/directories/{0}/files?not_cr_identifier={1}'.format(dir_id, cr_id)) + response = self.client.get( + "/rest/directories/{0}/files?not_cr_identifier={1}".format(dir_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def _assert_ok(self, cr_json, credentials_type): - dir_file_amt = cr_json['research_dataset']['directories'][0]['details']['file_count'] - dir_id = cr_json['research_dataset']['directories'][0]['identifier'] - cr_id = cr_json['identifier'] + dir_file_amt = cr_json["research_dataset"]["directories"][0]["details"]["file_count"] + dir_id = cr_json["research_dataset"]["directories"][0]["identifier"] + cr_id = cr_json["identifier"] self._set_http_authorization(credentials_type) - response = self.client.get('/rest/directories/{0}/files?cr_identifier={1}&recursive&depth=*' - .format(dir_id, cr_id)) + response = self.client.get( + "/rest/directories/{0}/files?cr_identifier={1}&recursive&depth=*".format(dir_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), dir_file_amt) - response = self.client.get('/rest/directories/{0}/files?not_cr_identifier={1}&recursive&depth=*' - .format(dir_id, cr_id)) + response = self.client.get( + "/rest/directories/{0}/files?not_cr_identifier={1}&recursive&depth=*".format( + dir_id, cr_id + ) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), 0) @@ -798,155 +949,237 @@ class DirectoryApiReadQueryFiltersTogetherTests(DirectoryApiReadCommon): def test_browsing_directories_with_filters(self): # directory filters including directories_only - response = self.client.get('/rest/directories/17/files? \ - directories_only&include_parent&directory_fields=id&directory_name=phase') + response = self.client.get( + "/rest/directories/17/files? \ + directories_only&include_parent&directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # file filters are not suppose to break anything with directories_only - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2') + file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds cr_identifier - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2') + file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds not_cr_identifier - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent¬_cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2') + file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # cr_identifier and not_cr_identifier are NOT suppose to work together - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent&cr_identifier=11¬_cr_identifier=13& \ - directory_fields=id&directory_name=phase&file_fields=id&file_name=2') + directory_fields=id&directory_name=phase&file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # adds pagination - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&pagination') + file_fields=id&file_name=2&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds recursive - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&recursive&depth=*') + file_fields=id&file_name=2&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds recursive and pagination - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&recursive&depth=*') + file_fields=id&file_name=2&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_browsing_files_and_directories_with_filters(self): # file filters - response = self.client.get('/rest/directories/17/files? \ - include_parent&file_fields=id&file_name=file') + response = self.client.get( + "/rest/directories/17/files? \ + include_parent&file_fields=id&file_name=file" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds directory filters - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds cr_identifier - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds not_cr_identifier - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent¬_cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # cr_identifier and not_cr_identifier are NOT suppose to work together - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent&cr_identifier=13¬_cr_identifier=11&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # adds recursive, directory filters are not suppose to break anything - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&recursive&depth=*') + directory_fields=id&directory_name=phase&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds pagination - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&pagination') + directory_fields=id&directory_name=phase&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds recursive and pagination - response = self.client.get('/rest/directories/17/files? \ + response = self.client.get( + "/rest/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&recursive&depth=*&pagination') + directory_fields=id&directory_name=phase&recursive&depth=*&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) class DirectoryApiReadCatalogRecordFileBrowsingRetrieveSpecificFieldsTests(DirectoryApiReadCommon): - def setUp(self): super().setUp() CatalogRecord.objects.get(pk=12).calculate_directory_byte_sizes_and_file_counts() def test_retrieve_requested_directory_fields_only(self): - response = self.client.get('/rest/datasets/12?file_details&directory_fields=identifier,directory_path') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['directories'][0]['details'], True) - self.assertEqual('directory_path' in response.data['research_dataset']['directories'][0]['details'], True) + response = self.client.get( + "/rest/datasets/12?file_details&directory_fields=identifier,directory_path" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 2, + ) + self.assertEqual( + "identifier" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) + self.assertEqual( + "directory_path" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) def test_retrieve_directory_byte_size_and_file_count(self): """ There is some additional logic involved in retrieving byte_size and file_count, which warrants targeted tests for just those fields. """ - response = self.client.get('/rest/datasets/12?file_details&directory_fields=identifier,byte_size') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['directories'][0]['details'], True) - self.assertEqual('byte_size' in response.data['research_dataset']['directories'][0]['details'], True) + response = self.client.get( + "/rest/datasets/12?file_details&directory_fields=identifier,byte_size" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 2, + ) + self.assertEqual( + "identifier" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) + self.assertEqual( + "byte_size" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) - response = self.client.get('/rest/datasets/12?file_details&directory_fields=identifier,file_count') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['directories'][0]['details'], True) - self.assertEqual('file_count' in response.data['research_dataset']['directories'][0]['details'], True) + response = self.client.get( + "/rest/datasets/12?file_details&directory_fields=identifier,file_count" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 2, + ) + self.assertEqual( + "identifier" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) + self.assertEqual( + "file_count" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) def test_retrieve_requested_file_fields_only(self): - response = self.client.get('/rest/datasets/12?file_details&file_fields=identifier,file_path') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['files'][0]['details'], True) - self.assertEqual('file_path' in response.data['research_dataset']['files'][0]['details'], True) + response = self.client.get( + "/rest/datasets/12?file_details&file_fields=identifier,file_path" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["research_dataset"]["files"][0]["details"].keys()), 2) + self.assertEqual( + "identifier" in response.data["research_dataset"]["files"][0]["details"], + True, + ) + self.assertEqual( + "file_path" in response.data["research_dataset"]["files"][0]["details"], + True, + ) def test_retrieve_requested_file_and_directory_fields_only(self): - response = self.client.get('/rest/datasets/12?file_details&file_fields=identifier&directory_fields=id') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files'][0]['details'].keys()), 1) - self.assertEqual('identifier' in response.data['research_dataset']['files'][0]['details'], True) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 1) - self.assertEqual('id' in response.data['research_dataset']['directories'][0]['details'], True) + response = self.client.get( + "/rest/datasets/12?file_details&file_fields=identifier&directory_fields=id" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["research_dataset"]["files"][0]["details"].keys()), 1) + self.assertEqual( + "identifier" in response.data["research_dataset"]["files"][0]["details"], + True, + ) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 1, + ) + self.assertEqual( + "id" in response.data["research_dataset"]["directories"][0]["details"], True + ) class DirectoryApiReadEndUserAccess(DirectoryApiReadCommon): - ''' + """ Test End User Access permissions when browsing files using /rest/directories api. Note: In these tests, the token by default does not have correct project groups. Token project groups are only made valid by calling _update_token_with_project_of_directory(). - ''' + """ def setUp(self): super().setUp() @@ -955,76 +1188,92 @@ def setUp(self): def _update_token_with_project_of_directory(self, dir_id): proj = Directory.objects.get(pk=dir_id).project_identifier - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) @responses.activate def test_user_can_browse_files_from_their_projects(self): - ''' + """ Ensure users can only read files from /rest/directories owned by them. - ''' - self._use_http_authorization(method='bearer', token=self.token) + """ + self._use_http_authorization(method="bearer", token=self.token) # first read files without project access - should fail - response = self.client.get('/rest/directories/1') + response = self.client.get("/rest/directories/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get('/rest/directories/1/files') + response = self.client.get("/rest/directories/1/files") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # set user to same project as previous files and try again. should now succeed self._update_token_with_project_of_directory(1) - response = self.client.get('/rest/directories/1') + response = self.client.get("/rest/directories/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/directories/1/files') + response = self.client.get("/rest/directories/1/files") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @responses.activate def test_browsing_by_project_and_file_path_is_protected(self): - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) dr = Directory.objects.get(pk=2) - response = self.client.get('/rest/directories/files?path=%s&project=%s' % - (dr.directory_path, dr.project_identifier)) + response = self.client.get( + "/rest/directories/files?path=%s&project=%s" + % (dr.directory_path, dr.project_identifier) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) self._update_token_with_project_of_directory(2) - response = self.client.get('/rest/directories/files?path=%s&project=%s' % - (dr.directory_path, dr.project_identifier)) + response = self.client.get( + "/rest/directories/files?path=%s&project=%s" + % (dr.directory_path, dr.project_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @responses.activate def test_browsing_in_cr_context(self): - ''' + """ Cr with open access type should be available for any end-user api user. Browsing files for a cr with restricted access type should be forbidden for non-owner (or service) user. - ''' + """ cr = CatalogRecord.objects.get(pk=1) - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.get('/rest/directories/3/files?cr_identifier={0}'.format(cr.identifier)) + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.get( + "/rest/directories/3/files?cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr.research_dataset['access_rights']['access_type']['identifier'] = ACCESS_TYPES['restricted'] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] cr.force_save() - response = self.client.get('/rest/directories/3/files?cr_identifier={0}'.format(cr.identifier)) + response = self.client.get( + "/rest/directories/3/files?cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @responses.activate def test_browsing_in_not_cr_context(self): - ''' + """ Cr with open access type should be available for any end-user api user. Browsing files for a cr with restricted access type should be forbidden for non-owner (or service) user. - ''' + """ cr = CatalogRecord.objects.get(pk=1) - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.get('/rest/directories/3/files?not_cr_identifier={0}'.format(cr.identifier)) + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.get( + "/rest/directories/3/files?not_cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr.research_dataset['access_rights']['access_type']['identifier'] = ACCESS_TYPES['restricted'] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] cr.force_save() - response = self.client.get('/rest/directories/3/files?not_cr_identifier={0}'.format(cr.identifier)) + response = self.client.get( + "/rest/directories/3/files?not_cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @@ -1043,149 +1292,193 @@ def test_read_directory_with_default_limit_pagination(self): """ Test browsing files with pagination """ - file_dict = self._get_dirs_files_ids('/rest/directories/24/files') - - response = self.client.get('/rest/directories/24/files?pagination') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(len(response.data['results']['files']), 0) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict['directories'][9]) - - next_link = response.data['next'].split('http://testserver')[1] + file_dict = self._get_dirs_files_ids("/rest/directories/24/files") + + response = self.client.get("/rest/directories/24/files?pagination") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual(len(response.data["results"]["files"]), 0) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][0], + ) + self.assertEqual( + response.data["results"]["directories"][9]["id"], + file_dict["directories"][9], + ) + + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 6) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][13]) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][5]['id'], file_dict['files'][5]) - - next_link = response.data['next'].split('http://testserver')[1] + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 6) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][10], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][13], + ) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) + + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 0) - self.assertEqual(len(response.data['results']['files']), 10) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][6]) - self.assertEqual(response.data['results']['files'][9]['id'], file_dict['files'][15]) + self.assertEqual(len(response.data["results"]["directories"]), 0) + self.assertEqual(len(response.data["results"]["files"]), 10) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][6]) + self.assertEqual(response.data["results"]["files"][9]["id"], file_dict["files"][15]) - prev_link = response.data['previous'].split('http://testserver')[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 6) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][13]) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][5]['id'], file_dict['files'][5]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 6) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][10], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][13], + ) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) def test_read_directory_with_custom_limit_pagination(self): - file_dict = self._get_dirs_files_ids('/rest/directories/24/files') - - response = self.client.get('/rest/directories/24/files?limit=4&offset=12&pagination') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 2) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][12]) - self.assertEqual(response.data['results']['directories'][1]['id'], file_dict['directories'][13]) - self.assertEqual(len(response.data['results']['files']), 2) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][1]['id'], file_dict['files'][1]) - - next_link = response.data['next'].split('http://testserver')[1] - prev_link = response.data['previous'].split('http://testserver')[1] + file_dict = self._get_dirs_files_ids("/rest/directories/24/files") + + response = self.client.get("/rest/directories/24/files?limit=4&offset=12&pagination") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["results"]["directories"]), 2) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][12], + ) + self.assertEqual( + response.data["results"]["directories"][1]["id"], + file_dict["directories"][13], + ) + self.assertEqual(len(response.data["results"]["files"]), 2) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][1]["id"], file_dict["files"][1]) + + next_link = response.data["next"].split("http://testserver")[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 0) - self.assertEqual(len(response.data['results']['files']), 4) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][2]) - self.assertEqual(response.data['results']['files'][3]['id'], file_dict['files'][5]) + self.assertEqual(len(response.data["results"]["directories"]), 0) + self.assertEqual(len(response.data["results"]["files"]), 4) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][2]) + self.assertEqual(response.data["results"]["files"][3]["id"], file_dict["files"][5]) response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 0) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][8]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][11]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 0) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][8], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][11], + ) def test_read_directory_with_recursive_and_pagination(self): - ''' + """ Query with recursive flag must return only files as a list - ''' - file_list = self._get_dirs_files_ids('/rest/directories/24/files?recursive') + """ + file_list = self._get_dirs_files_ids("/rest/directories/24/files?recursive") - response = self.client.get('/rest/directories/24/files?recursive&pagination') + response = self.client.get("/rest/directories/24/files?recursive&pagination") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['id'], file_list[0]) - self.assertEqual(response.data['results'][9]['id'], file_list[9]) + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual(response.data["results"][0]["id"], file_list[0]) + self.assertEqual(response.data["results"][9]["id"], file_list[9]) - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['id'], file_list[10]) - self.assertEqual(response.data['results'][9]['id'], file_list[19]) + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual(response.data["results"][0]["id"], file_list[10]) + self.assertEqual(response.data["results"][9]["id"], file_list[19]) - prev_link = response.data['previous'].split('http://testserver')[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['id'], file_list[0]) - self.assertEqual(response.data['results'][9]['id'], file_list[9]) + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual(response.data["results"][0]["id"], file_list[0]) + self.assertEqual(response.data["results"][9]["id"], file_list[9]) def test_read_directory_with_dirs_only_and_pagination(self): - ''' + """ Query with directories_only flag must return only directories - ''' - file_dict = self._get_dirs_files_ids('/rest/directories/24/files?directories_only')['directories'] + """ + file_dict = self._get_dirs_files_ids("/rest/directories/24/files?directories_only")[ + "directories" + ] - response = self.client.get('/rest/directories/24/files?directories_only&pagination=true') + response = self.client.get("/rest/directories/24/files?directories_only&pagination=true") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict[0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict[9]) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[0]) + self.assertEqual(response.data["results"]["directories"][9]["id"], file_dict[9]) - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict[10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict[13]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[10]) + self.assertEqual(response.data["results"]["directories"][3]["id"], file_dict[13]) - prev_link = response.data['previous'].split('http://testserver')[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict[0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict[9]) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[0]) + self.assertEqual(response.data["results"]["directories"][9]["id"], file_dict[9]) def test_read_directory_with_parent_and_pagination(self): - ''' + """ Query with directories_only flag must return only directories - ''' - file_dict = self._get_dirs_files_ids('/rest/directories/24/files?include_parent') - - response = self.client.get('/rest/directories/24/files?include_parent&pagination=true') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict['directories'][9]) - self.assertEqual(response.data['results']['id'], 24) - self.assertEqual(response.data['results']['directory_name'], "10") - - next_link = response.data['next'].split('http://testserver')[1] + """ + file_dict = self._get_dirs_files_ids("/rest/directories/24/files?include_parent") + + response = self.client.get("/rest/directories/24/files?include_parent&pagination=true") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][0], + ) + self.assertEqual( + response.data["results"]["directories"][9]["id"], + file_dict["directories"][9], + ) + self.assertEqual(response.data["results"]["id"], 24) + self.assertEqual(response.data["results"]["directory_name"], "10") + + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 6) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][13]) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][5]['id'], file_dict['files'][5]) - self.assertEqual(response.data['results']['id'], 24) - self.assertEqual(response.data['results']['directory_name'], "10") + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 6) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][10], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][13], + ) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) + self.assertEqual(response.data["results"]["id"], 24) + self.assertEqual(response.data["results"]["directory_name"], "10") class DirectoryApiReadFileNameDirectoryNameTests(DirectoryApiReadCommon): @@ -1200,144 +1493,172 @@ def setUp(self): def test_browsing_directory_with_file_name(self): - response = self.client.get('/rest/directories/24/files?file_name=') + response = self.client.get("/rest/directories/24/files?file_name=") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 51) + self.assertEqual(len(response.data["files"]), 51) - response = self.client.get('/rest/directories/24/files?file_name=_name_1') + response = self.client.get("/rest/directories/24/files?file_name=_name_1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 21) + self.assertEqual(len(response.data["files"]), 21) - response = self.client.get('/rest/directories/24/files?file_name=0') + response = self.client.get("/rest/directories/24/files?file_name=0") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 15) + self.assertEqual(len(response.data["files"]), 15) - response = self.client.get('/rest/directories/24/files?file_name=_name_118') + response = self.client.get("/rest/directories/24/files?file_name=_name_118") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) def test_browsing_directory_with_directory_name(self): - response = self.client.get('/rest/directories/24/files?directory_name=') + response = self.client.get("/rest/directories/24/files?directory_name=") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) + self.assertEqual(len(response.data["directories"]), 5) - response = self.client.get('/rest/directories/24/files?directory_name=dir_1') + response = self.client.get("/rest/directories/24/files?directory_name=dir_1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) + self.assertEqual(len(response.data["directories"]), 1) - response = self.client.get('/rest/directories/24/files?directory_name=dir') + response = self.client.get("/rest/directories/24/files?directory_name=dir") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) + self.assertEqual(len(response.data["directories"]), 5) def test_browsing_directory_with_directory_and_file_name(self): - response = self.client.get('/rest/directories/24/files?directory_name=&file_name=') + response = self.client.get("/rest/directories/24/files?directory_name=&file_name=") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) - self.assertEqual(len(response.data['files']), 51) + self.assertEqual(len(response.data["directories"]), 5) + self.assertEqual(len(response.data["files"]), 51) - response = self.client.get('/rest/directories/24/files?directory_name=dir_1&file_name=file_name_120') + response = self.client.get( + "/rest/directories/24/files?directory_name=dir_1&file_name=file_name_120" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/directories/24/files?directory_name=dir&file_name=not_existing') + response = self.client.get( + "/rest/directories/24/files?directory_name=dir&file_name=not_existing" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) - self.assertEqual(len(response.data['files']), 0) + self.assertEqual(len(response.data["directories"]), 5) + self.assertEqual(len(response.data["files"]), 0) def test_browsing_directory_with_file_and_dir_name_and_pagination(self): # second page should return last filtered files - response = self.client.get('/rest/directories/24/files?file_name=0&pagination&limit=10&offset=10') + response = self.client.get( + "/rest/directories/24/files?file_name=0&pagination&limit=10&offset=10" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 0) - self.assertEqual(len(response.data['results']['files']), 10) + self.assertEqual(len(response.data["results"]["directories"]), 0) + self.assertEqual(len(response.data["results"]["files"]), 10) # first page with limit of 3 should return first filtered directories - response = self.client.get('/rest/directories/24/files?directory_name=dir_&pagination&limit=3') + response = self.client.get( + "/rest/directories/24/files?directory_name=dir_&pagination&limit=3" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 3) - self.assertEqual(len(response.data['results']['files']), 0) + self.assertEqual(len(response.data["results"]["directories"]), 3) + self.assertEqual(len(response.data["results"]["files"]), 0) # first page with limit of 3 should return first filtered directories - response = self.client.get('/rest/directories/24/files?directory_name=dir_1&file_name=0&pagination') + response = self.client.get( + "/rest/directories/24/files?directory_name=dir_1&file_name=0&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 1) - self.assertEqual(len(response.data['results']['files']), 9) + self.assertEqual(len(response.data["results"]["directories"]), 1) + self.assertEqual(len(response.data["results"]["files"]), 9) def test_browsing_directory_with_directory_and_file_name_and_dirs_only(self): - response = self.client.get('/rest/directories/24/files?file_name=_name_11&directories_only') + response = self.client.get("/rest/directories/24/files?file_name=_name_11&directories_only") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 5) - self.assertEqual(response.data.get('files'), None) + self.assertEqual(len(response.data["directories"]), 5) + self.assertEqual(response.data.get("files"), None) - response = self.client.get('/rest/directories/24/files?directory_name=dir_5&directories_only') + response = self.client.get( + "/rest/directories/24/files?directory_name=dir_5&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data.get('files'), None) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data.get("files"), None) def test_browsing_directory_with_directory_and_file_name_and_recursive(self): - response = self.client.get('/rest/directories/24/files?file_name=_name_11&recursive') + response = self.client.get("/rest/directories/24/files?file_name=_name_11&recursive") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) # should have one file from directory with the rest of filtered files - response = self.client.get('/rest/directories/24/files?directory_name=dir_5&file_name=5&recursive&depth=*') + response = self.client.get( + "/rest/directories/24/files?directory_name=dir_5&file_name=5&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 6) - def test_browsing_directory_with_directory_and_file_name_and_cr_identifier_and_not_cr_identifier(self): + def test_browsing_directory_with_directory_and_file_name_and_cr_identifier_and_not_cr_identifier( + self, + ): # tests for directory_name and cr_identifier - response = self.client.get('/rest/directories/17/files?directory_name=2&cr_identifier=13') + response = self.client.get("/rest/directories/17/files?directory_name=2&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 1) + self.assertEqual(len(response.data["directories"]), 1) - response = self.client.get('/rest/directories/17/files?directory_name=phase&cr_identifier=13') + response = self.client.get( + "/rest/directories/17/files?directory_name=phase&cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 2) + self.assertEqual(len(response.data["directories"]), 2) - response = self.client.get('/rest/directories/17/files?directory_name=&cr_identifier=13') + response = self.client.get("/rest/directories/17/files?directory_name=&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 2) + self.assertEqual(len(response.data["directories"]), 2) # tests for directory_name and not_cr_identifier - response = self.client.get('/rest/directories/17/files?directory_name=phase¬_cr_identifier=13') + response = self.client.get( + "/rest/directories/17/files?directory_name=phase¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 0) + self.assertEqual(len(response.data["directories"]), 0) - response = self.client.get('/rest/directories/17/files?directory_name=2¬_cr_identifier=13') + response = self.client.get( + "/rest/directories/17/files?directory_name=2¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 0) + self.assertEqual(len(response.data["directories"]), 0) # tests for file_name and cr_identifier - response = self.client.get('/rest/directories/12/files?file_name=22&cr_identifier=13') + response = self.client.get("/rest/directories/12/files?file_name=22&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/directories/12/files?file_name=name_&cr_identifier=13') + response = self.client.get("/rest/directories/12/files?file_name=name_&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 3) + self.assertEqual(len(response.data["files"]), 3) - response = self.client.get('/rest/directories/12/files?file_name=&cr_identifier=13') + response = self.client.get("/rest/directories/12/files?file_name=&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 3) + self.assertEqual(len(response.data["files"]), 3) - response = self.client.get('/rest/directories/17/files?file_name=&cr_identifier=13&directories_only') + response = self.client.get( + "/rest/directories/17/files?file_name=&cr_identifier=13&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files'), None) + self.assertEqual(response.data.get("files"), None) # tests for file_name and not_cr_identifier - response = self.client.get('/rest/directories/16/files?file_name=name¬_cr_identifier=13') + response = self.client.get("/rest/directories/16/files?file_name=name¬_cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/directories/16/files?file_name=name_2¬_cr_identifier=13') + response = self.client.get( + "/rest/directories/16/files?file_name=name_2¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/directories/16/files?file_name=name_1¬_cr_identifier=13') + response = self.client.get( + "/rest/directories/16/files?file_name=name_1¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 0) + self.assertEqual(len(response.data["files"]), 0) diff --git a/src/metax_api/tests/api/rest/base/views/directories/write.py b/src/metax_api/tests/api/rest/base/views/directories/write.py index f363a6d2..c9268578 100755 --- a/src/metax_api/tests/api/rest/base/views/directories/write.py +++ b/src/metax_api/tests/api/rest/base/views/directories/write.py @@ -18,17 +18,17 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DirectoryApiWriteCommon, cls).setUpClass() def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) - dir_from_test_data = self._get_object_from_test_data('directory') - self.identifier = dir_from_test_data['identifier'] - self.directory_name = dir_from_test_data['directory_name'] + call_command("loaddata", test_data_file_path, verbosity=0) + dir_from_test_data = self._get_object_from_test_data("directory") + self.identifier = dir_from_test_data["identifier"] + self.directory_name = dir_from_test_data["directory_name"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified @@ -39,162 +39,219 @@ def setUp(self): self._use_http_authorization() def _get_new_test_data(self): - from_test_data = self._get_object_from_test_data('directory', requested_index=0) - from_test_data.update({ - "identifier": "urn:nbn:fi:csc-ida201401200000000001", - }) + from_test_data = self._get_object_from_test_data("directory", requested_index=0) + from_test_data.update( + { + "identifier": "urn:nbn:fi:csc-ida201401200000000001", + } + ) return from_test_data def _get_second_new_test_data(self): from_test_data = self._get_new_test_data() - from_test_data.update({ - "identifier": "urn:nbn:fi:csc-ida201401200000000002", - }) + from_test_data.update( + { + "identifier": "urn:nbn:fi:csc-ida201401200000000002", + } + ) return from_test_data class DirectoryApiWriteTests(DirectoryApiWriteCommon): - def test_create_files_for_catalog_record(self): """ Tests flow of creating files and assigning them to dataset. """ - project = 'project-test-files' - directory_path = '/dir/' + project = "project-test-files" + directory_path = "/dir/" files = [] for n in range(1, 4): - file_path = directory_path + 'file' + str(n) - f = self._get_new_file_data(str(n), project=project, file_path=file_path, - directory_path=directory_path, open_access=True) - f.pop('parent_directory', None) + file_path = directory_path + "file" + str(n) + f = self._get_new_file_data( + str(n), + project=project, + file_path=file_path, + directory_path=directory_path, + open_access=True, + ) + f.pop("parent_directory", None) files.append(f) cr = self._get_ida_dataset_without_files() - cr['cumulative_state'] = 1 + cr["cumulative_state"] = 1 - fields = 'file_fields=id,identifier,file_path&directory_fields=id,identifier,directory_path,file_count' + fields = "file_fields=id,identifier,file_path&directory_fields=id,identifier,directory_path,file_count" # start test # - self._set_http_authorization('service') + self._set_http_authorization("service") # adding file1 to /dir/ - response = self.client.post('/rest/files', files[0], format='json') + response = self.client.post("/rest/files", files[0], format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - file1_id = response.data['identifier'] - file1_title = response.data['file_name'] + file1_id = response.data["identifier"] + file1_title = response.data["file_name"] # adding file2 to /dir/ - response = self.client.post('/rest/files', files[1], format='json') + response = self.client.post("/rest/files", files[1], format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - file2_id = response.data['identifier'] - file2_title = response.data['file_name'] + file2_id = response.data["identifier"] + file2_title = response.data["file_name"] # adding file3 to /dir/ - response = self.client.post('/rest/files', files[2], format='json') + response = self.client.post("/rest/files", files[2], format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - file3_id = response.data['identifier'] - file3_title = response.data['file_name'] + file3_id = response.data["identifier"] + file3_title = response.data["file_name"] # creating dataset - response = self.client.post('/rest/datasets', cr, format='json') + response = self.client.post("/rest/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] - cr_rd_identifier = response.data['research_dataset']['preferred_identifier'] + cr_id = response.data["id"] + cr_rd_identifier = response.data["research_dataset"]["preferred_identifier"] # getting dataset root directory identifier (%2F=='/') - root_dir = self.client.get('/rest/directories/files?project={}&path=%2F&include_parent'.format(project)) - root_id = root_dir.data['id'] + root_dir = self.client.get( + "/rest/directories/files?project={}&path=%2F&include_parent".format(project) + ) + root_id = root_dir.data["id"] # getting dataset files from / - response = self.client.get('/rest/directories/files?cr_identifier={}&project={}&path=%2F&{}' - .format(cr_id, project, fields)) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Directory must be empty') + response = self.client.get( + "/rest/directories/files?cr_identifier={}&project={}&path=%2F&{}".format( + cr_id, project, fields + ) + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, "Directory must be empty") # adding file_1 to dataset - cr['research_dataset']['files'] = [ + cr["research_dataset"]["files"] = [ { "identifier": file1_id, "title": file1_title, "use_category": { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/configuration" - } + }, } ] - cr['research_dataset']["preferred_identifier"] = cr_rd_identifier - response = self.client.put('/rest/datasets/{}'.format(cr_id), cr, format='json') + cr["research_dataset"]["preferred_identifier"] = cr_rd_identifier + response = self.client.put("/rest/datasets/{}".format(cr_id), cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # getting dataset files from / - response = self.client.get('/rest/directories/{}/files?cr_identifier={}&fields'.format(root_id, cr_id)) + response = self.client.get( + "/rest/directories/{}/files?cr_identifier={}&fields".format(root_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - dirs = response.data['directories'] - self.assertEqual(len(dirs), 1, 'Expected 1 directory') - self.assertEqual(dirs[0]['file_count'], 1, 'Expected 1 file in directory %s' % dirs[0]['directory_path']) + dirs = response.data["directories"] + self.assertEqual(len(dirs), 1, "Expected 1 directory") + self.assertEqual( + dirs[0]["file_count"], + 1, + "Expected 1 file in directory %s" % dirs[0]["directory_path"], + ) # getting dataset files from /dir/ - response = self.client.get('/rest/directories/{}/files?cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(len(response.data['files']), 1, 'Expected 1 file in directory {}' - .format(dirs[0]['directory_path'])) - self.assertEqual(response.data['file_count'], len(response.data['files']), - 'Expected 1 file in parent file_count') + response = self.client.get( + "/rest/directories/{}/files?cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual( + len(response.data["files"]), + 1, + "Expected 1 file in directory {}".format(dirs[0]["directory_path"]), + ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 1 file in parent file_count", + ) # getting non-dataset files from /dir/ - response = self.client.get('/rest/directories/{}/files?not_cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(len(response.data['files']), 2, 'Expected 2 files in directory {}' - .format(dirs[0]['directory_path'])) - self.assertEqual(response.data['file_count'], len(response.data['files']), - 'Expected 2 file in parent file_count') + response = self.client.get( + "/rest/directories/{}/files?not_cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual( + len(response.data["files"]), + 2, + "Expected 2 files in directory {}".format(dirs[0]["directory_path"]), + ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 2 file in parent file_count", + ) # adding file2 and file3 to dataset - cr['research_dataset']['files'] = cr['research_dataset']['files'] + [ + cr["research_dataset"]["files"] = cr["research_dataset"]["files"] + [ { "identifier": file2_id, "title": file2_title, "use_category": { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/configuration" - } + }, }, { "identifier": file3_id, "title": file3_title, "use_category": { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/configuration" - } - } + }, + }, ] - cr['research_dataset']['preferred_identifier'] = cr_rd_identifier + cr["research_dataset"]["preferred_identifier"] = cr_rd_identifier - response = self.client.patch('/rest/datasets/{}'.format(cr_id), cr, format='json') + response = self.client.patch("/rest/datasets/{}".format(cr_id), cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # getting dataset files from /dir/ - response = self.client.get('/rest/directories/{}/files?cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(len(response.data['files']), 3, 'Expected 3 files in directory {}' - .format(dirs[0]['directory_path'])) - self.assertEqual(response.data['file_count'], len(response.data['files']), - 'Expected 3 file in parent file_count') + response = self.client.get( + "/rest/directories/{}/files?cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual( + len(response.data["files"]), + 3, + "Expected 3 files in directory {}".format(dirs[0]["directory_path"]), + ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 3 file in parent file_count", + ) # getting non-dataset files from /dir/ - response = self.client.get('/rest/directories/{}/files?not_cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Directory must be empty') + response = self.client.get( + "/rest/directories/{}/files?not_cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, "Directory must be empty") # getting dataset files from / - response = self.client.get('/rest/directories/{}/files?cr_identifier={}&fields'.format(root_id, cr_id)) + response = self.client.get( + "/rest/directories/{}/files?cr_identifier={}&fields".format(root_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - dirs = response.data['directories'] - self.assertEqual(len(dirs), 1, 'Expected 1 directory') - self.assertEqual(dirs[0]['file_count'], 3, 'Expected 3 files in directory %s' % dirs[0]['directory_path']) + dirs = response.data["directories"] + self.assertEqual(len(dirs), 1, "Expected 1 directory") + self.assertEqual( + dirs[0]["file_count"], + 3, + "Expected 3 files in directory %s" % dirs[0]["directory_path"], + ) # getting dataset files from / - response = self.client.get('/rest/directories/{}/files?not_cr_identifier={}&fields'.format(root_id, cr_id)) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Directory must be empty') + response = self.client.get( + "/rest/directories/{}/files?not_cr_identifier={}&fields".format(root_id, cr_id) + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, "Directory must be empty") diff --git a/src/metax_api/tests/api/rest/base/views/files/read.py b/src/metax_api/tests/api/rest/base/views/files/read.py index 1e00dc6f..011ede0a 100755 --- a/src/metax_api/tests/api/rest/base/views/files/read.py +++ b/src/metax_api/tests/api/rest/base/views/files/read.py @@ -21,80 +21,94 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileApiReadCommon, cls).setUpClass() def setUp(self): - file_from_test_data = self._get_object_from_test_data('file') - self.identifier = file_from_test_data['identifier'] - self.pk = file_from_test_data['id'] + file_from_test_data = self._get_object_from_test_data("file") + self.identifier = file_from_test_data["identifier"] + self.pk = file_from_test_data["id"] self._use_http_authorization() class FileApiReadBasicTests(FileApiReadCommon): - def test_read_file_list(self): - response = self.client.get('/rest/files') + response = self.client.get("/rest/files") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_file_list_filter_by_project(self): proj = File.objects.get(pk=1).project_identifier file_count = File.objects.filter(project_identifier=proj).count() - response = self.client.get('/rest/files?project_identifier=%s' % proj) + response = self.client.get("/rest/files?project_identifier=%s" % proj) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], file_count) + self.assertEqual(response.data["count"], file_count) def test_read_file_list_filter_by_project_and_path(self): proj = File.objects.get(pk=1).project_identifier path = "/project_x_FROZEN/Experiment_X/Phase_1/2017/01" file_count = File.objects.filter(project_identifier=proj, file_path__contains=path).count() - response = self.client.get('/rest/files?project_identifier=%s&file_path=%s' % (proj, path)) + response = self.client.get("/rest/files?project_identifier=%s&file_path=%s" % (proj, path)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], file_count) + self.assertEqual(response.data["count"], file_count) # missing project_identifier - response = self.client.get('/rest/files?file_path=%s' % path) + response = self.client.get("/rest/files?file_path=%s" % path) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_file_details_by_pk(self): - response = self.client.get('/rest/files/%s' % self.pk) + response = self.client.get("/rest/files/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('file_name' in response.data, True) - self.assertEqual(response.data['identifier'], self.identifier) - self.assertEqual('identifier' in response.data['file_storage'], True) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("file_name" in response.data, True) + self.assertEqual(response.data["identifier"], self.identifier) + self.assertEqual("identifier" in response.data["file_storage"], True) def test_read_file_details_by_identifier(self): - response = self.client.get('/rest/files/%s' % self.identifier) + response = self.client.get("/rest/files/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('file_name' in response.data.keys(), True) - self.assertEqual(response.data['identifier'], self.identifier) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("file_name" in response.data.keys(), True) + self.assertEqual(response.data["identifier"], self.identifier) def test_read_file_details_not_found(self): - response = self.client.get('/rest/files/shouldnotexist') + response = self.client.get("/rest/files/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_file_details_checksum_relation(self): - response = self.client.get('/rest/files/%s' % self.pk) + response = self.client.get("/rest/files/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('checksum' in response.data, True) - self.assertEqual('value' in response.data['checksum'], True) + self.assertEqual("checksum" in response.data, True) + self.assertEqual("value" in response.data["checksum"], True) def test_expand_relations(self): - response = self.client.get('/rest/files/1?expand_relation=file_storage,parent_directory') + response = self.client.get("/rest/files/1?expand_relation=file_storage,parent_directory") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('file_storage_json' in response.data['file_storage'], True, response.data['file_storage']) - self.assertEqual('date_created' in response.data['parent_directory'], True, response.data['parent_directory']) + self.assertEqual( + "file_storage_json" in response.data["file_storage"], + True, + response.data["file_storage"], + ) + self.assertEqual( + "date_created" in response.data["parent_directory"], + True, + response.data["parent_directory"], + ) class FileApiReadGetRelatedDatasets(FileApiReadCommon): - def test_get_related_datasets_ok_1(self): """ File pk 1 should belong to only 3 datasets """ - response = self.client.post('/rest/files/datasets', [1], format='json') + response = self.client.post("/rest/files/datasets", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 3) @@ -102,8 +116,10 @@ def test_get_related_datasets_ok_2(self): """ File identifiers listed below should belong to 5 datasets """ - file_identifiers = File.objects.filter(id__in=[1, 2, 3, 4, 5]).values_list('identifier', flat=True) - response = self.client.post('/rest/files/datasets', file_identifiers, format='json') + file_identifiers = File.objects.filter(id__in=[1, 2, 3, 4, 5]).values_list( + "identifier", flat=True + ) + response = self.client.post("/rest/files/datasets", file_identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -111,31 +127,37 @@ def test_keysonly(self): """ Parameter ?keysonly should return just values """ - response = self.client.post('/rest/files/datasets?keys=files&keysonly', [1, 2, 121], format='json') + response = self.client.post( + "/rest/files/datasets?keys=files&keysonly", [1, 2, 121], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 2) # pid:urn:121 does not belong to any dataset - self.assertEqual(type(response.data), list, type(response.data)) # no dict keys + self._assert_results_length(response, 2) # pid:urn:121 does not belong to any dataset + self.assertEqual(type(response.data), list, type(response.data)) # no dict keys - response = self.client.post('/rest/files/datasets?keys=files&keysonly=false', [1, 2], format='json') + response = self.client.post( + "/rest/files/datasets?keys=files&keysonly=false", [1, 2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(type(response.data), dict, response.data) # Return by keys + self.assertEqual(type(response.data), dict, response.data) # Return by keys - response = self.client.post('/rest/files/datasets?keys=datasets&keysonly', [1, 2, 14], format='json') + response = self.client.post( + "/rest/files/datasets?keys=datasets&keysonly", [1, 2, 14], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 2) # Only datasets 1 and 2 have files - self.assertEqual(type(response.data), list, type(response.data)) # no dict keys + self._assert_results_length(response, 2) # Only datasets 1 and 2 have files + self.assertEqual(type(response.data), list, type(response.data)) # no dict keys def test_get_detailed_related_datasets_ok_1(self): """ File identifiers listed below should belong to 3 datasets """ - response = self.client.post('/rest/files/datasets?keys=files', [1], format='json') + response = self.client.post("/rest/files/datasets?keys=files", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 1) self.assertEqual(len(list(response.data.values())[0]), 3, response.data) # Support for ?detailed - response = self.client.post('/rest/files/datasets?detailed', [1], format='json') + response = self.client.post("/rest/files/datasets?detailed", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 1) self.assertEqual(len(list(response.data.values())[0]), 3, response.data) @@ -146,7 +168,9 @@ def test_get_detailed_related_datasets_ok_2(self): """ file_identifiers = [1, 2, 3, 4, 5] - response = self.client.post('/rest/files/datasets?keys=files', file_identifiers, format='json') + response = self.client.post( + "/rest/files/datasets?keys=files", file_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -154,9 +178,17 @@ def test_get_detailed_related_datasets_ok_2(self): self.assertEqual(len(set(sum(response.data.values(), []))), 5, response.data) # check if identifiers work - file_identifiers = ['pid:urn:1', 'pid:urn:2', 'pid:urn:3', 'pid:urn:4', 'pid:urn:5'] - - response = self.client.post('/rest/files/datasets?keys=files', file_identifiers, format='json') + file_identifiers = [ + "pid:urn:1", + "pid:urn:2", + "pid:urn:3", + "pid:urn:4", + "pid:urn:5", + ] + + response = self.client.post( + "/rest/files/datasets?keys=files", file_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -167,7 +199,7 @@ def test_get_detailed_related_files_ok_1(self): """ Dataset identifiers listed below should have 2 files """ - response = self.client.post('/rest/files/datasets?keys=datasets', [1], format='json') + response = self.client.post("/rest/files/datasets?keys=datasets", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 1) self.assertEqual(len(list(response.data.values())[0]), 2, response.data) @@ -178,7 +210,9 @@ def test_get_detailed_related_files_ok_2(self): """ dataset_identifiers = [1, 2, 3, 4, 5] - response = self.client.post('/rest/files/datasets?keys=datasets', dataset_identifiers, format='json') + response = self.client.post( + "/rest/files/datasets?keys=datasets", dataset_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -186,13 +220,17 @@ def test_get_detailed_related_files_ok_2(self): self.assertEqual(len(set(sum(response.data.values(), []))), 10, response.data) # check if identifiers work - dataset_identifiers = ["cr955e904-e3dd-4d7e-99f1-3fed446f96d1", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d2", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d3", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d4", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d5"] - - response = self.client.post('/rest/files/datasets?keys=datasets', dataset_identifiers, format='json') + dataset_identifiers = [ + "cr955e904-e3dd-4d7e-99f1-3fed446f96d1", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d2", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d3", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d4", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", + ] + + response = self.client.post( + "/rest/files/datasets?keys=datasets", dataset_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -203,61 +241,82 @@ def test_get_right_files_and_datasets(self): """ Check that returned files and datasets are the right ones """ - testfile = self._get_object_from_test_data('file') + testfile = self._get_object_from_test_data("file") - cr = self.client.get('/rest/datasets/10', format='json') + cr = self.client.get("/rest/datasets/10", format="json") self.assertEqual(cr.status_code, status.HTTP_200_OK, cr.data) - response = self.client.post('/rest/files/datasets?keys=datasets', [cr.data['identifier']], format='json') + response = self.client.post( + "/rest/files/datasets?keys=datasets", [cr.data["identifier"]], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # cr 10 has 2 default files for keys, values in response.data.items(): - self.assertEqual(keys == 'cr955e904-e3dd-4d7e-99f1-3fed446f9610', True, response.data) - self.assertEqual('pid:urn:19' and 'pid:urn:20' in values, True, response.data) + self.assertEqual(keys == "cr955e904-e3dd-4d7e-99f1-3fed446f9610", True, response.data) + self.assertEqual("pid:urn:19" and "pid:urn:20" in values, True, response.data) - response = self.client.post('/rest/files/datasets?keys=files', [testfile['identifier']], format='json') + response = self.client.post( + "/rest/files/datasets?keys=files", [testfile["identifier"]], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # file 1 belongs to 3 datasets for keys, values in response.data.items(): - self.assertEqual(keys == 'pid:urn:1', True, response.data) - self.assertEqual('cr955e904-e3dd-4d7e-99f1-3fed446f96d1' and 'cr955e904-e3dd-4d7e-99f1-3fed446f9612' - and 'cr955e904-e3dd-4d7e-99f1-3fed446f9611' in values, True, response.data) + self.assertEqual(keys == "pid:urn:1", True, response.data) + self.assertEqual( + "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" + and "cr955e904-e3dd-4d7e-99f1-3fed446f9612" + and "cr955e904-e3dd-4d7e-99f1-3fed446f9611" in values, + True, + response.data, + ) # Dataset 11 has 20 files in a directory - cr = self.client.get('/rest/datasets/11', format='json') + cr = self.client.get("/rest/datasets/11", format="json") self.assertEqual(cr.status_code, status.HTTP_200_OK, cr.data) # Compare using return from different api - files_in_cr11 = self.client.get('/rest/datasets/11/files', format='json') + files_in_cr11 = self.client.get("/rest/datasets/11/files", format="json") self.assertEqual(files_in_cr11.status_code, status.HTTP_200_OK, files_in_cr11.data) identifiers = [] - [identifiers.append(i['identifier']) for i in files_in_cr11.data] + [identifiers.append(i["identifier"]) for i in files_in_cr11.data] - response = self.client.post('/rest/files/datasets?keys=datasets', [11], format='json') + response = self.client.post("/rest/files/datasets?keys=datasets", [11], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # This should have the same file id's as the return from /rest/v2/datasets/11/files - self.assertEqual(sorted(response.data['cr955e904-e3dd-4d7e-99f1-3fed446f9611']), sorted(identifiers), - response.data) - - response = self.client.post('/rest/files/datasets?keys=files', ['pid:urn:20'], format='json') + self.assertEqual( + sorted(response.data["cr955e904-e3dd-4d7e-99f1-3fed446f9611"]), + sorted(identifiers), + response.data, + ) + + response = self.client.post( + "/rest/files/datasets?keys=files", ["pid:urn:20"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # Dataset 11 should be found from results - self.assertTrue('cr955e904-e3dd-4d7e-99f1-3fed446f9611' in response.data['pid:urn:20'], response.data) + self.assertTrue( + "cr955e904-e3dd-4d7e-99f1-3fed446f9611" in response.data["pid:urn:20"], + response.data, + ) def test_get_related_datasets_files_not_found(self): """ When the files themselves are not found, 404 should be returned """ - response = self.client.post('/rest/files/datasets', ['doesnotexist'], format='json') + response = self.client.post("/rest/files/datasets", ["doesnotexist"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) - response = self.client.post('/rest/files/datasets?keys=files', ['doesnotexist'], format='json') + response = self.client.post( + "/rest/files/datasets?keys=files", ["doesnotexist"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) # Support for ?detailed - response = self.client.post('/rest/files/datasets?detailed', ['doesnotexist'], format='json') + response = self.client.post( + "/rest/files/datasets?detailed", ["doesnotexist"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) @@ -267,27 +326,30 @@ def test_get_related_datasets_records_not_found(self): """ with connection.cursor() as cr: # detach file pk 1 from any datasets - cr.execute('delete from metax_api_catalogrecord_files where file_id = 1') + cr.execute("delete from metax_api_catalogrecord_files where file_id = 1") - response = self.client.post('/rest/files/datasets', [1], format='json') + response = self.client.post("/rest/files/datasets", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) - response = self.client.post('/rest/files/datasets?keys=files', [1], format='json') + response = self.client.post("/rest/files/datasets?keys=files", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) # Support for ?detailed - response = self.client.post('/rest/files/datasets?detailed', [1], format='json') + response = self.client.post("/rest/files/datasets?detailed", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) def _assert_results_length(self, response, length): - self.assertTrue(isinstance(response.data, dict) or isinstance(response.data, list), response.data) + self.assertTrue( + isinstance(response.data, dict) or isinstance(response.data, list), + response.data, + ) self.assertEqual(len(response.data), length) -class FileApiReadEndUserAccess(FileApiReadCommon): +class FileApiReadEndUserAccess(FileApiReadCommon): def setUp(self): super().setUp() self.token = get_test_oidc_token() @@ -295,32 +357,36 @@ def setUp(self): @responses.activate def test_user_can_read_owned_files(self): - ''' + """ Ensure users can only read files owned by them from /rest/files api. - ''' + """ # first read files without project access - should fail - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) proj = File.objects.get(pk=1).project_identifier - response = self.client.get('/rest/files/1') + response = self.client.get("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get('/rest/files?project_identifier=%s' % proj) + response = self.client.get("/rest/files?project_identifier=%s" % proj) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get('/rest/files?no_pagination') + response = self.client.get("/rest/files?no_pagination") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data), 0, 'should return 200 OK, but user projects has no files') + self.assertEqual( + len(response.data), + 0, + "should return 200 OK, but user projects has no files", + ) # set user to same project as previous files and try again. should now succeed - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get('/rest/files') + response = self.client.get("/rest/files") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data) > 0, True, 'user should only see their own files') + self.assertEqual(len(response.data) > 0, True, "user should only see their own files") - response = self.client.get('/rest/files/1') + response = self.client.get("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/files?project_identifier=%s' % proj) + response = self.client.get("/rest/files?project_identifier=%s" % proj) self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/base/views/files/write.py b/src/metax_api/tests/api/rest/base/views/files/write.py index 211bac5c..b93816eb 100755 --- a/src/metax_api/tests/api/rest/base/views/files/write.py +++ b/src/metax_api/tests/api/rest/base/views/files/write.py @@ -25,18 +25,18 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileApiWriteCommon, cls).setUpClass() def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) - file_from_test_data = self._get_object_from_test_data('file') - self.identifier = file_from_test_data['identifier'] - self.pidentifier = file_from_test_data['project_identifier'] - self.file_name = file_from_test_data['file_name'] + call_command("loaddata", test_data_file_path, verbosity=0) + file_from_test_data = self._get_object_from_test_data("file") + self.identifier = file_from_test_data["identifier"] + self.pidentifier = file_from_test_data["project_identifier"] + self.file_name = file_from_test_data["file_name"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified @@ -47,21 +47,25 @@ def setUp(self): self._use_http_authorization() def _get_new_test_data(self): - from_test_data = self._get_object_from_test_data('file', requested_index=0) - from_test_data.update({ - "checksum": { - "value": "habeebit", - "algorithm": "SHA-256", - "checked": "2017-05-23T10:07:22.559656Z", - }, - "file_name": "file_name_1", - "file_path": from_test_data['file_path'].replace('/some/path', '/some/other_path'), - "identifier": "urn:nbn:fi:csc-ida201401200000000001", - "file_storage": self._get_object_from_test_data('filestorage', requested_index=0) - }) - from_test_data['file_path'] = from_test_data['file_path'].replace('/Experiment_X/', '/test/path/') - from_test_data['project_identifier'] = 'test_project_identifier' - del from_test_data['id'] + from_test_data = self._get_object_from_test_data("file", requested_index=0) + from_test_data.update( + { + "checksum": { + "value": "habeebit", + "algorithm": "SHA-256", + "checked": "2017-05-23T10:07:22.559656Z", + }, + "file_name": "file_name_1", + "file_path": from_test_data["file_path"].replace("/some/path", "/some/other_path"), + "identifier": "urn:nbn:fi:csc-ida201401200000000001", + "file_storage": self._get_object_from_test_data("filestorage", requested_index=0), + } + ) + from_test_data["file_path"] = from_test_data["file_path"].replace( + "/Experiment_X/", "/test/path/" + ) + from_test_data["project_identifier"] = "test_project_identifier" + del from_test_data["id"] return from_test_data def _get_second_new_test_data(self): @@ -73,7 +77,7 @@ def _get_second_new_test_data(self): def _count_dirs_from_path(self, file_path): expected_dirs_count = 1 dir_name = dirname(file_path) - while dir_name != '/': + while dir_name != "/": dir_name = dirname(dir_name) expected_dirs_count += 1 return expected_dirs_count @@ -83,34 +87,34 @@ def _check_project_root_byte_size_and_file_count(self, project_identifier): A rather simple test to fetch the root directory of a project, and verify that the root's calculated total byte size and file count match what exists in the db. """ - byte_size = File.objects.filter(project_identifier=project_identifier) \ - .aggregate(Sum('byte_size'))['byte_size__sum'] + byte_size = File.objects.filter(project_identifier=project_identifier).aggregate( + Sum("byte_size") + )["byte_size__sum"] file_count = File.objects.filter(project_identifier=project_identifier).count() - response = self.client.get('/rest/directories/root?project=%s' % project_identifier) + response = self.client.get("/rest/directories/root?project=%s" % project_identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['byte_size'], byte_size) - self.assertEqual(response.data['file_count'], file_count) + self.assertEqual(response.data["byte_size"], byte_size) + self.assertEqual(response.data["file_count"], file_count) def _change_file_path(self, file, new_name): - file['file_path'] = file['file_path'].replace(file['file_name'], new_name) - file['file_name'] = new_name + file["file_path"] = file["file_path"].replace(file["file_name"], new_name) + file["file_name"] = new_name class FileApiWriteReferenceDataValidationTests(FileApiWriteCommon): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('updatereferencedata', verbosity=0) + call_command("updatereferencedata", verbosity=0) super(FileApiWriteReferenceDataValidationTests, cls).setUpClass() def setUp(self): super().setUp() cache = RedisClient() - ffv_refdata = cache.get('reference_data')['reference_data']['file_format_version'] + ffv_refdata = cache.get("reference_data")["reference_data"]["file_format_version"] # File format version entry in reference data that has some output_format_version self.ff_with_version = None @@ -122,73 +126,116 @@ def setUp(self): for ffv_obj in ffv_refdata: if self.ff_with_different_version is None and self.ff_with_version is not None: - if ffv_obj['input_file_format'] == self.ff_with_version['input_file_format']: + if ffv_obj["input_file_format"] == self.ff_with_version["input_file_format"]: self.ff_with_different_version = ffv_obj - if self.ff_with_version is None and ffv_obj['output_format_version']: + if self.ff_with_version is None and ffv_obj["output_format_version"]: self.ff_with_version = ffv_obj - if self.ff_without_version is None and not ffv_obj['output_format_version']: + if self.ff_without_version is None and not ffv_obj["output_format_version"]: self.ff_without_version = ffv_obj - self.assertTrue(self.ff_with_version['output_format_version'] != '') - self.assertTrue(self.ff_with_different_version['output_format_version'] != '') - self.assertTrue(self.ff_with_version['input_file_format'] == - self.ff_with_different_version['input_file_format']) - self.assertTrue(self.ff_with_version['output_format_version'] != - self.ff_with_different_version['output_format_version']) - self.assertTrue(self.ff_without_version['output_format_version'] == '') - - def test_file_format_version_with_invalid_file_format_when_format_version_given_1(self): - self.test_new_data['file_characteristics']['format_version'] = 'any' - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.assertTrue(self.ff_with_version["output_format_version"] != "") + self.assertTrue(self.ff_with_different_version["output_format_version"] != "") + self.assertTrue( + self.ff_with_version["input_file_format"] + == self.ff_with_different_version["input_file_format"] + ) + self.assertTrue( + self.ff_with_version["output_format_version"] + != self.ff_with_different_version["output_format_version"] + ) + self.assertTrue(self.ff_without_version["output_format_version"] == "") + + def test_file_format_version_with_invalid_file_format_when_format_version_given_1( + self, + ): + self.test_new_data["file_characteristics"]["format_version"] = "any" + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.file_format' in response.data['file_characteristics'], True) - - def test_file_format_version_with_invalid_file_format_when_format_version_given_2(self): - self.test_new_data['file_characteristics']['file_format'] = 'nonexisting' - self.test_new_data['file_characteristics']['format_version'] = 'any' - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.file_format" in response.data["file_characteristics"], + True, + ) + + def test_file_format_version_with_invalid_file_format_when_format_version_given_2( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = "nonexisting" + self.test_new_data["file_characteristics"]["format_version"] = "any" + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.file_format' in response.data['file_characteristics'], True) - - def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_1(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.file_format" in response.data["file_characteristics"], + True, + ) + + def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_1( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.format_version' in response.data['file_characteristics'], True) - - def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_2(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = 'nonexisting' - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.format_version" in response.data["file_characteristics"], + True, + ) + + def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_2( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"]["format_version"] = "nonexisting" + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.format_version' in response.data['file_characteristics'], True) - - def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_1(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_without_version['input_file_format'] - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.format_version" in response.data["file_characteristics"], + True, + ) + + def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_1( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ + "input_file_format" + ] + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_2(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_without_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = '' - response = self.client.post('/rest/files', self.test_new_data, format="json") + def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_2( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"]["format_version"] = "" + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_file_format_version_with_valid_file_format_and_valid_file_version_1(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = self.ff_with_version['output_format_version'] - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"]["format_version"] = self.ff_with_version[ + "output_format_version" + ] + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_file_format_version_with_valid_file_format_and_valid_file_version_2(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = \ - self.ff_with_different_version['output_format_version'] - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"][ + "format_version" + ] = self.ff_with_different_version["output_format_version"] + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) # update tests @@ -197,11 +244,15 @@ def test_file_characteristics_is_validated_on_update(self): """ Ensure validation also works when updating existing files. """ - self.test_new_data['file_characteristics']['file_format'] = self.ff_without_version['input_file_format'] - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ + "input_file_format" + ] + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.put('/rest/files/%s' % response.data['identifier'], response.data, format="json") + response = self.client.put( + "/rest/files/%s" % response.data["identifier"], response.data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @@ -216,81 +267,92 @@ class FileApiWriteCreateTests(FileApiWriteCommon): def test_create_file(self): # note: leading and trailing whitespace must be preserved. - newly_created_file_name = " MX .201015_Suomessa_tavattavat_ruokasammakot_ovat_vƤritykseltƤƤn_vaihtelevia_" \ - "osa_on_ruskeita,_osa_kirkkaankin_vihreitƤ._Vihersammakoiden_silmƤt_ovat_kohtalaisen_korkealla_pƤƤlae" \ + newly_created_file_name = ( + " MX .201015_Suomessa_tavattavat_ruokasammakot_ovat_vƤritykseltƤƤn_vaihtelevia_" + "osa_on_ruskeita,_osa_kirkkaankin_vihreitƤ._Vihersammakoiden_silmƤt_ovat_kohtalaisen_korkealla_pƤƤlae" "lla._Sammakkolampi.fi_CC-BY-NC-4.0_thumb.jpg.meta " - self.test_new_data['file_name'] = newly_created_file_name - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + ) + self.test_new_data["file_name"] = newly_created_file_name + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('file_name' in response.data.keys(), True) - self.assertEqual(response.data['file_name'], newly_created_file_name) - self._check_project_root_byte_size_and_file_count(response.data['project_identifier']) + self.assertEqual("file_name" in response.data.keys(), True) + self.assertEqual(response.data["file_name"], newly_created_file_name) + self._check_project_root_byte_size_and_file_count(response.data["project_identifier"]) def test_create_file_error_identifier_exists(self): # first ok - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") # second should give error - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('identifier' in response.data.keys(), True) - self.assertEqual('already exists' in response.data['identifier'][0], True) + self.assertEqual("identifier" in response.data.keys(), True) + self.assertEqual("already exists" in response.data["identifier"][0], True) def test_allow_creating_previously_deleted_file(self): """ It should be possible to delete a file, and then create the exact same file again without letting the removed file conflict. """ - response = self.client.post('/rest/files', self.test_new_data, format="json") - response = self.client.delete('/rest/files/%d' % response.data['id'], format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") + response = self.client.delete("/rest/files/%d" % response.data["id"], format="json") - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_create_file_error_json_validation(self): - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' - self.test_new_data['file_characteristics'] = { + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" + self.test_new_data["file_characteristics"] = { "application_name": "Application Name", "description": "A nice description 0000000010", "metadata_modified": 12345, "file_created": "2014-01-17T08:19:31Z", "encoding": "utf-8", - "title": "A title 0000000010" + "title": "A title 0000000010", } - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True, - 'The error should concern the field file_characteristics') - self.assertEqual('metadata_modified' in response.data['file_characteristics'][0], True, - 'The error should contain the name of the erroneous field') - self.assertEqual('Json path:' in response.data['file_characteristics'][0], True, - 'The error should contain the json path') + self.assertEqual( + "file_characteristics" in response.data.keys(), + True, + "The error should concern the field file_characteristics", + ) + self.assertEqual( + "metadata_modified" in response.data["file_characteristics"][0], + True, + "The error should contain the name of the erroneous field", + ) + self.assertEqual( + "Json path:" in response.data["file_characteristics"][0], + True, + "The error should contain the json path", + ) def test_create_file_allowed_checksum_algorithm(self): - self.test_new_data['checksum']['algorithm'] = 'SHA-512' + self.test_new_data["checksum"]["algorithm"] = "SHA-512" - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['checksum']['algorithm'], 'SHA-512') + self.assertEqual(response.data["checksum"]["algorithm"], "SHA-512") - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-md5' - self.test_new_data['file_path'] = '/md5/filepath/md5-filename' - self.test_new_data['file_name'] = 'md5-filename' - self.test_new_data['checksum']['algorithm'] = 'MD5' + self.test_new_data["identifier"] = "urn:nbn:fi:csc-md5" + self.test_new_data["file_path"] = "/md5/filepath/md5-filename" + self.test_new_data["file_name"] = "md5-filename" + self.test_new_data["checksum"]["algorithm"] = "MD5" - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['checksum']['algorithm'], 'MD5') + self.assertEqual(response.data["checksum"]["algorithm"], "MD5") def test_create_file_not_allowed_checksum_algorithm(self): from django.db import transaction - for algo in ['sha2', 'sha256', 'sha-256']: + for algo in ["sha2", "sha256", "sha-256"]: # run POST requests inside db transaction to ensure django testcase transactions # work correctly. https://stackoverflow.com/a/23326971/1201945 this probably has # somethind to do with the fact that POST requests to /rest/files do not normally @@ -300,45 +362,60 @@ def test_create_file_not_allowed_checksum_algorithm(self): # alternative for below would be to use optional query param ?dryrun=true, which # causes the request to be executed inside a transaction too. with transaction.atomic(): - self.test_new_data['checksum']['algorithm'] = algo - response = self.client.post('/rest/files', self.test_new_data, format="json") + self.test_new_data["checksum"]["algorithm"] = algo + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('checksum_algorithm' in response.data, True) + self.assertEqual("checksum_algorithm" in response.data, True) # # create list operations # def test_create_file_list(self): - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' - self._change_file_path(self.test_new_data, 'one_file.txt') + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" + self._change_file_path(self.test_new_data, "one_file.txt") - self.second_test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurnalso' - self._change_file_path(self.second_test_new_data, 'two_file.txt') + self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurnalso" + self._change_file_path(self.second_test_new_data, "two_file.txt") - response = self.client.post('/rest/files', [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/files", + [self.test_new_data, self.second_test_new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('object' in response.data['success'][0].keys(), True) - self.assertEqual(len(response.data['failed']), 0, response.data['failed']) - self.assertEqual(len(response.data['success']), 2) - self._check_project_root_byte_size_and_file_count(response.data['success'][0]['object']['project_identifier']) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("object" in response.data["success"][0].keys(), True) + self.assertEqual(len(response.data["failed"]), 0, response.data["failed"]) + self.assertEqual(len(response.data["success"]), 2) + self._check_project_root_byte_size_and_file_count( + response.data["success"][0]["object"]["project_identifier"] + ) # ensure structure of some specific fields is the same as when single files are created - self.assertEqual('identifier' in response.data['success'][0]['object']['file_storage'], True) - self.assertEqual('identifier' in response.data['success'][0]['object']['parent_directory'], True) - self.assertEqual('checksum' in response.data['success'][0]['object'], True) - self.assertEqual('value' in response.data['success'][0]['object']['checksum'], True) + self.assertEqual( + "identifier" in response.data["success"][0]["object"]["file_storage"], True + ) + self.assertEqual( + "identifier" in response.data["success"][0]["object"]["parent_directory"], + True, + ) + self.assertEqual("checksum" in response.data["success"][0]["object"], True) + self.assertEqual("value" in response.data["success"][0]["object"]["checksum"], True) def test_create_file_list_error_one_fails(self): - newly_created_file_name = 'newly_created_file_name' - self.test_new_data['file_name'] = newly_created_file_name - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + newly_created_file_name = "newly_created_file_name" + self.test_new_data["file_name"] = newly_created_file_name + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" # same as above - should fail - self.second_test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - response = self.client.post('/rest/files', [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/files", + [self.test_new_data, self.second_test_new_data], + format="json", + ) """ List response looks like @@ -357,40 +434,50 @@ def test_create_file_list_error_one_fails(self): } """ self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual('file_name' in response.data['failed'][0]['object'].keys(), True) - self.assertEqual('identifier' in response.data['failed'][0]['errors'], True, - 'The error should have been about an already existing identifier') + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual("file_name" in response.data["failed"][0]["object"].keys(), True) + self.assertEqual( + "identifier" in response.data["failed"][0]["errors"], + True, + "The error should have been about an already existing identifier", + ) def test_parameter_ignore_already_exists_errors(self): - newly_created_file_name = 'newly_created_file_name' - self.test_new_data['file_name'] = newly_created_file_name - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + newly_created_file_name = "newly_created_file_name" + self.test_new_data["file_name"] = newly_created_file_name + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" # same as above - should cause an error. - self.second_test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - response = self.client.post('/rest/files?ignore_already_exists_errors', - [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/files?ignore_already_exists_errors", + [self.test_new_data, self.second_test_new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual(len(response.data), 2) - self.assertEqual('already exists' in response.data['success'][1]['object']['detail'], True) + self.assertEqual("already exists" in response.data["success"][1]["object"]["detail"], True) def test_create_file_list_error_all_fail(self): - newly_created_file_name = 'newly_created_file_name' - self.test_new_data['file_name'] = newly_created_file_name + newly_created_file_name = "newly_created_file_name" + self.test_new_data["file_name"] = newly_created_file_name # identifier is a required field, should fail - self.test_new_data['identifier'] = None - self.second_test_new_data['identifier'] = None - - response = self.client.post('/rest/files', [self.test_new_data, self.second_test_new_data], format="json") + self.test_new_data["identifier"] = None + self.second_test_new_data["identifier"] = None + + response = self.client.post( + "/rest/files", + [self.test_new_data, self.second_test_new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual(len(response.data['success']), 0) - self.assertEqual(len(response.data['failed']), 2) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual(len(response.data["success"]), 0) + self.assertEqual(len(response.data["failed"]), 2) class FileApiWriteCreateDirectoriesTests(FileApiWriteCommon): @@ -406,19 +493,21 @@ def test_create_file_hierarchy_from_single_file(self): """ f = self._form_complex_list_from_test_file()[0] - file_path = '/project_y_FROZEN/Experiment_1/path/of/lonely/file_and_this_also_has_to_support' \ - 'veryverylooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' \ - 'ooooooooooooooooooooooooooooooooooooooongdirectorynames/%s' - f['file_path'] = file_path % f['file_name'] - f['identifier'] = 'abc123111' - - response = self.client.post('/rest/files', f, format="json") + file_path = ( + "/project_y_FROZEN/Experiment_1/path/of/lonely/file_and_this_also_has_to_support" + "veryverylooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooongdirectorynames/%s" + ) + f["file_path"] = file_path % f["file_name"] + f["identifier"] = "abc123111" + + response = self.client.post("/rest/files", f, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('date_created' in response.data, True) - self.assertEqual('parent_directory' in response.data, True) + self.assertEqual("date_created" in response.data, True) + self.assertEqual("parent_directory" in response.data, True) - dirs_count = Directory.objects.filter(project_identifier='project_y').count() - dirs_created_count = self._count_dirs_from_path(f['file_path']) + dirs_count = Directory.objects.filter(project_identifier="project_y").count() + dirs_created_count = self._count_dirs_from_path(f["file_path"]) self.assertEqual(dirs_count, dirs_created_count) def test_create_file_append_to_existing_directory(self): @@ -428,24 +517,38 @@ def test_create_file_append_to_existing_directory(self): Note: Targeting project_x, which exists in pre-generated test data. """ - project_identifier = 'project_x' + project_identifier = "project_x" dir_count_before = Directory.objects.filter(project_identifier=project_identifier).count() - file_count_before = Directory.objects.filter(project_identifier=project_identifier, - directory_path='/project_x_FROZEN/Experiment_X/Phase_1').first().files.all().count() + file_count_before = ( + Directory.objects.filter( + project_identifier=project_identifier, + directory_path="/project_x_FROZEN/Experiment_X/Phase_1", + ) + .first() + .files.all() + .count() + ) f = self._form_complex_list_from_test_file()[0] - f['file_path'] = '/project_x_FROZEN/Experiment_X/Phase_1/%s' % f['file_name'] - f['identifier'] = '%s-111' % f['file_path'] - f['project_identifier'] = project_identifier + f["file_path"] = "/project_x_FROZEN/Experiment_X/Phase_1/%s" % f["file_name"] + f["identifier"] = "%s-111" % f["file_path"] + f["project_identifier"] = project_identifier - response = self.client.post('/rest/files', f, format="json") + response = self.client.post("/rest/files", f, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('date_created' in response.data, True) - self.assertEqual('parent_directory' in response.data, True) + self.assertEqual("date_created" in response.data, True) + self.assertEqual("parent_directory" in response.data, True) dir_count_after = Directory.objects.filter(project_identifier=project_identifier).count() - file_count_after = Directory.objects.filter(project_identifier=project_identifier, - directory_path='/project_x_FROZEN/Experiment_X/Phase_1').first().files.all().count() + file_count_after = ( + Directory.objects.filter( + project_identifier=project_identifier, + directory_path="/project_x_FROZEN/Experiment_X/Phase_1", + ) + .first() + .files.all() + .count() + ) self.assertEqual(dir_count_before, dir_count_after) self.assertEqual(file_count_after - file_count_before, 1) @@ -457,13 +560,13 @@ def test_create_file_hierarchy_from_file_list_with_no_existing_files(self): """ experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 12) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 12) + self.assertEqual(len(response.data["failed"]), 0) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_create_file_hierarchy_from_file_list_with_existing_files(self): @@ -480,23 +583,25 @@ def test_create_file_hierarchy_from_file_list_with_existing_files(self): # setup db to have pre-existing dirs experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/files", experiment_1_file_list, format="json") # form new test data experiment_2_file_list = self._form_complex_list_from_test_file() for i, f in enumerate(experiment_2_file_list): - f['file_path'] = f['file_path'].replace('/project_y_FROZEN/Experiment_1', - '/project_y_FROZEN/Experiment_2/Phase_1/Data') - f['identifier'] = '%s-%d' % (f['file_path'], i) + f["file_path"] = f["file_path"].replace( + "/project_y_FROZEN/Experiment_1", + "/project_y_FROZEN/Experiment_2/Phase_1/Data", + ) + f["identifier"] = "%s-%d" % (f["file_path"], i) - response = self.client.post('/rest/files', experiment_2_file_list, format="json") + response = self.client.post("/rest/files", experiment_2_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 12) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 12) + self.assertEqual(len(response.data["failed"]), 0) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_append_files_to_existing_directory(self): @@ -508,7 +613,7 @@ def test_append_files_to_existing_directory(self): # setup db to have pre-existing dirs experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/files", experiment_1_file_list, format="json") # form new test data, and trim it down a bit experiment_2_file_list = self._form_complex_list_from_test_file() @@ -516,16 +621,16 @@ def test_append_files_to_existing_directory(self): experiment_2_file_list.pop() for i, f in enumerate(experiment_2_file_list): - f['file_path'] = '/project_y_FROZEN/Experiment_2/%s' % f['file_name'] - f['identifier'] = '%s-%d' % (f['file_path'], i) + f["file_path"] = "/project_y_FROZEN/Experiment_2/%s" % f["file_name"] + f["identifier"] = "%s-%d" % (f["file_path"], i) - response = self.client.post('/rest/files', experiment_2_file_list, format="json") + response = self.client.post("/rest/files", experiment_2_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['failed']), 0, response.data['failed']) - self.assertEqual(len(response.data['success']), 5) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["failed"]), 0, response.data["failed"]) + self.assertEqual(len(response.data["success"]), 5) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_append_one_file_to_existing_directory(self): @@ -537,22 +642,22 @@ def test_append_one_file_to_existing_directory(self): # setup db to have pre-existing dirs experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/files", experiment_1_file_list, format="json") # form new test data, but use just the first item experiment_2_file_list = self._form_complex_list_from_test_file()[0:1] for i, f in enumerate(experiment_2_file_list): - f['file_path'] = '/project_y_FROZEN/Experiment_2/%s' % f['file_name'] - f['identifier'] = '%s-%d' % (f['file_path'], i) + f["file_path"] = "/project_y_FROZEN/Experiment_2/%s" % f["file_name"] + f["identifier"] = "%s-%d" % (f["file_path"], i) - response = self.client.post('/rest/files', experiment_2_file_list, format="json") + response = self.client.post("/rest/files", experiment_2_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1) + self.assertEqual(len(response.data["failed"]), 0) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_create_file_hierarchy_error_file_list_has_invalid_data(self): @@ -562,33 +667,35 @@ def test_create_file_hierarchy_error_file_list_has_invalid_data(self): in a single request is also not permitted. """ experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0].pop('file_path') - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + experiment_1_file_list[0].pop("file_path") + response = self.client.post("/rest/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_path' in response.data, True) - self.assertEqual('required parameter' in response.data['file_path'][0], True) + self.assertEqual("file_path" in response.data, True) + self.assertEqual("required parameter" in response.data["file_path"][0], True) experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0].pop('project_identifier') - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + experiment_1_file_list[0].pop("project_identifier") + response = self.client.post("/rest/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('project_identifier' in response.data, True) - self.assertEqual('required parameter' in response.data['project_identifier'][0], True) + self.assertEqual("project_identifier" in response.data, True) + self.assertEqual("required parameter" in response.data["project_identifier"][0], True) experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0]['project_identifier'] = 'second_project' - response = self.client.post('/rest/files', experiment_1_file_list, format="json") + experiment_1_file_list[0]["project_identifier"] = "second_project" + response = self.client.post("/rest/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('project_identifier' in response.data, True) - self.assertEqual('multiple projects' in response.data['project_identifier'][0], True) + self.assertEqual("project_identifier" in response.data, True) + self.assertEqual("multiple projects" in response.data["project_identifier"][0], True) def test_filepath_starts_with_slash(self): file = self._get_new_test_data() - file['file_path'] = file['file_path'][1:] + file["file_path"] = file["file_path"][1:] - response = self.client.post('/rest/files', file, format="json") + response = self.client.post("/rest/files", file, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("file path should start with '/' to point to the root" in response.data['file_path'][0]) + self.assertTrue( + "file path should start with '/' to point to the root" in response.data["file_path"][0] + ) def _assert_directory_parent_dirs(self, project_identifier): """ @@ -598,17 +705,24 @@ def _assert_directory_parent_dirs(self, project_identifier): for d in Directory.objects.filter(project_identifier=project_identifier): dirs_dict[d.directory_path] = { - 'dir_id': d.id, - 'parent_dir_id': d.parent_directory and d.parent_directory.id or None + "dir_id": d.id, + "parent_dir_id": d.parent_directory and d.parent_directory.id or None, } for dir_path, ids in dirs_dict.items(): - if dir_path == '/': - self.assertEqual(ids['parent_dir_id'], None, 'root dir \'/\' should not have a parent directory') + if dir_path == "/": + self.assertEqual( + ids["parent_dir_id"], + None, + "root dir '/' should not have a parent directory", + ) continue expected_parent_dir_path = dirname(dir_path) - self.assertEqual(ids['parent_dir_id'], dirs_dict[expected_parent_dir_path]['dir_id'], - 'parent dir not as expected.') + self.assertEqual( + ids["parent_dir_id"], + dirs_dict[expected_parent_dir_path]["dir_id"], + "parent dir not as expected.", + ) return dirs_dict @@ -616,11 +730,14 @@ def _assert_file_parent_dirs(self, dirs_dict, response): """ Check files have parent dirs as expected. """ - for entry in response.data['success']: - f = entry['object'] - excpected_parent_dir_path = dirname(f['file_path']) - self.assertEqual(f['parent_directory']['id'], dirs_dict[excpected_parent_dir_path]['dir_id'], - 'parent dir not as expected.') + for entry in response.data["success"]: + f = entry["object"] + excpected_parent_dir_path = dirname(f["file_path"]) + self.assertEqual( + f["parent_directory"]["id"], + dirs_dict[excpected_parent_dir_path]["dir_id"], + "parent dir not as expected.", + ) def _form_complex_list_from_test_file(self): """ @@ -678,18 +795,20 @@ def _form_complex_list_from_test_file(self): ] template = self.test_new_data - template.pop('id', None) - template.pop('identifier', None) - template.pop('project_identifier', None) - template.pop('parent_directory', None) - template.pop('date_created', None) - template.pop('date_modified', None) - template.pop('service_created', None) + template.pop("id", None) + template.pop("identifier", None) + template.pop("project_identifier", None) + template.pop("parent_directory", None) + template.pop("date_created", None) + template.pop("date_modified", None) + template.pop("service_created", None) files = [] for i, d in enumerate(dir_data): files.append(deepcopy(template)) - files[-1].update(d, identifier='pid:urn:test:file:%d' % i, project_identifier='project_y') + files[-1].update( + d, identifier="pid:urn:test:file:%d" % i, project_identifier="project_y" + ) return files @@ -700,15 +819,15 @@ class FileApiWriteUpdateTests(FileApiWriteCommon): """ def test_update_file(self): - f = self.client.get('/rest/files/1').data - f['file_format'] = 'csv' - response = self.client.put('/rest/files/%s' % f['identifier'], f, format="json") + f = self.client.get("/rest/files/1").data + f["file_format"] = "csv" + response = self.client.put("/rest/files/%s" % f["identifier"], f, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_prevent_file_path_update_after_create(self): - f = self.client.get('/rest/files/1').data - f['file_path'] = '%s_bak' % f['file_path'] - response = self.client.put('/rest/files/%s' % f['identifier'], f, format="json") + f = self.client.get("/rest/files/1").data + f["file_path"] = "%s_bak" % f["file_path"] + response = self.client.put("/rest/files/%s" % f["identifier"], f, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_update_file_error_required_fields(self): @@ -716,120 +835,151 @@ def test_update_file_error_required_fields(self): Field 'project_identifier' is missing, which should result in an error, since PUT replaces an object and requires all 'required' fields to be present. """ - self.test_new_data.pop('project_identifier') - response = self.client.put('/rest/files/%s' % self.identifier, self.test_new_data, format="json") + self.test_new_data.pop("project_identifier") + response = self.client.put( + "/rest/files/%s" % self.identifier, self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('project_identifier' in response.data.keys(), True, - 'Error for field \'project_identifier\' is missing from response.data') + self.assertEqual( + "project_identifier" in response.data.keys(), + True, + "Error for field 'project_identifier' is missing from response.data", + ) def test_update_file_not_found(self): - response = self.client.put('/rest/files/doesnotexist', self.test_new_data, format="json") + response = self.client.put("/rest/files/doesnotexist", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_update_file_allowed_projects_ok(self): - f = self.client.get('/rest/files/1').data - response = self.client.put('/rest/files/%s?allowed_projects=%s' % (f['identifier'], f['project_identifier']), - f, format="json") + f = self.client.get("/rest/files/1").data + response = self.client.put( + "/rest/files/%s?allowed_projects=%s" % (f["identifier"], f["project_identifier"]), + f, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_file_allowed_projects_fail(self): - f = self.client.get('/rest/files/1').data - response = self.client.put('/rest/files/%s?allowed_projects=nopermission' % f['identifier'], f, format="json") + f = self.client.get("/rest/files/1").data + response = self.client.put( + "/rest/files/%s?allowed_projects=nopermission" % f["identifier"], + f, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_update_file_allowed_projects_not_dict(self): - f = self.client.get('/rest/files/1').data - response = self.client.put('/rest/files/%s?allowed_projects=%s' % (f['identifier'], f['project_identifier']), - [f], format="json") + f = self.client.get("/rest/files/1").data + response = self.client.put( + "/rest/files/%s?allowed_projects=%s" % (f["identifier"], f["project_identifier"]), + [f], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('json' in response.data['detail'][0], True, 'Error regarding datatype') + self.assertEqual("json" in response.data["detail"][0], True, "Error regarding datatype") # # update list operations PUT # def test_file_update_list(self): - f1 = self.client.get('/rest/files/1').data - f2 = self.client.get('/rest/files/2').data - new_file_format = 'changed-format' - new_file_format_2 = 'changed-format-2' - f1['file_format'] = new_file_format - f2['file_format'] = new_file_format_2 - - response = self.client.put('/rest/files', [f1, f2], format="json") + f1 = self.client.get("/rest/files/1").data + f2 = self.client.get("/rest/files/2").data + new_file_format = "changed-format" + new_file_format_2 = "changed-format-2" + f1["file_format"] = new_file_format + f2["file_format"] = new_file_format_2 + + response = self.client.put("/rest/files", [f1, f2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) updated_file = File.objects.get(pk=1) self.assertEqual(updated_file.file_format, new_file_format) def test_file_update_list_error_one_fails(self): - f1 = self.client.get('/rest/files/1').data - f2 = self.client.get('/rest/files/2').data - new_file_format = 'changed-format' - f1['file_format'] = new_file_format + f1 = self.client.get("/rest/files/1").data + f2 = self.client.get("/rest/files/2").data + new_file_format = "changed-format" + f1["file_format"] = new_file_format # cant be null - should fail - f2['file_frozen'] = None + f2["file_frozen"] = None - response = self.client.put('/rest/files', [f1, f2], format="json") + response = self.client.put("/rest/files", [f1, f2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['success']), 1, 'success list should be empty') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - self.assertEqual('file_frozen' in response.data['failed'][0]['errors'], True, - 'error should be about file_characteristics missing') + self.assertEqual(len(response.data["success"]), 1, "success list should be empty") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + self.assertEqual( + "file_frozen" in response.data["failed"][0]["errors"], + True, + "error should be about file_characteristics missing", + ) updated_file = File.objects.get(pk=1) self.assertEqual(updated_file.file_format, new_file_format) def test_file_update_list_error_key_not_found(self): - f1 = self.client.get('/rest/files/1').data - f2 = self.client.get('/rest/files/2').data - new_file_format = 'changed-format' - new_file_format_2 = 'changed-format-2' - f1['file_format'] = new_file_format - f2['file_format'] = new_file_format_2 + f1 = self.client.get("/rest/files/1").data + f2 = self.client.get("/rest/files/2").data + new_file_format = "changed-format" + new_file_format_2 = "changed-format-2" + f1["file_format"] = new_file_format + f2["file_format"] = new_file_format_2 # has no lookup key - should fail - f2.pop('id') - f2.pop('identifier') + f2.pop("id") + f2.pop("identifier") - response = self.client.put('/rest/files', [f1, f2], format="json") + response = self.client.put("/rest/files", [f1, f2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['success']), 1, 'success list should be empty') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - error_msg_of_failed_row = response.data['failed'][0]['errors']['detail'][0] - self.assertEqual('identifying keys' in error_msg_of_failed_row, True, - 'error should be about identifying keys missing') + self.assertEqual(len(response.data["success"]), 1, "success list should be empty") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + error_msg_of_failed_row = response.data["failed"][0]["errors"]["detail"][0] + self.assertEqual( + "identifying keys" in error_msg_of_failed_row, + True, + "error should be about identifying keys missing", + ) updated_file = File.objects.get(pk=1) self.assertEqual(updated_file.file_format, new_file_format) def test_file_update_list_allowed_projects_ok(self): # Both files in project 'project_x' - f1 = self.client.get('/rest/files/1').data - f2 = self.client.get('/rest/files/2').data + f1 = self.client.get("/rest/files/1").data + f2 = self.client.get("/rest/files/2").data - response = self.client.put('/rest/files?allowed_projects=project_x,y,z', [f1, f2], format="json") + response = self.client.put( + "/rest/files?allowed_projects=project_x,y,z", [f1, f2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_file_update_list_allowed_projects_fail(self): # Files in projects 'project_x' and 'research_project_112' - f1 = self.client.get('/rest/files/1').data - f2 = self.client.get('/rest/files/39').data + f1 = self.client.get("/rest/files/1").data + f2 = self.client.get("/rest/files/39").data - response = self.client.put('/rest/files?allowed_projects=project_x,y,z', [f1, f2], format="json") + response = self.client.put( + "/rest/files?allowed_projects=project_x,y,z", [f1, f2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_file_update_list_allowed_projects_empty_value(self): - f1 = self.client.get('/rest/files/1').data - response = self.client.put('/rest/files?allowed_projects=', [f1], format="json") + f1 = self.client.get("/rest/files/1").data + response = self.client.put("/rest/files?allowed_projects=", [f1], format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_file_update_list_allowed_projects_not_list(self): new_data_1 = {} - new_data_1['identifier'] = "pid:urn:1" - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["identifier"] = "pid:urn:1" + new_data_1["file_name"] = "Nice_new_name" - res = self.client.patch('/rest/files?allowed_projects=y,z,project_x', new_data_1, format="json") + res = self.client.patch( + "/rest/files?allowed_projects=y,z,project_x", new_data_1, format="json" + ) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) @@ -842,29 +992,43 @@ def test_update_file_partial(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/files/%s' % self.identifier, new_data, format="json") + response = self.client.patch("/rest/files/%s" % self.identifier, new_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('file_name' in response.data.keys(), True) - self.assertEqual('file_path' in response.data.keys(), True, 'PATCH operation should return full content') - self.assertEqual(response.data['file_name'], 'new_file_name', 'Field file_name was not updated') + self.assertEqual("file_name" in response.data.keys(), True) + self.assertEqual( + "file_path" in response.data.keys(), + True, + "PATCH operation should return full content", + ) + self.assertEqual( + response.data["file_name"], + "new_file_name", + "Field file_name was not updated", + ) def test_update_partial_allowed_projects_ok(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/files/%s?allowed_projects=%s' % (self.identifier, self.pidentifier), - new_data, format="json") + response = self.client.patch( + "/rest/files/%s?allowed_projects=%s" % (self.identifier, self.pidentifier), + new_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['file_name'], 'new_file_name', response.data) + self.assertEqual(response.data["file_name"], "new_file_name", response.data) def test_update_partial_allowed_projects_fail(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/files/%s?allowed_projects=noproject' % self.identifier, - new_data, format="json") + response = self.client.patch( + "/rest/files/%s?allowed_projects=noproject" % self.identifier, + new_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @@ -872,76 +1036,106 @@ def test_update_partial_allowed_projects_not_dict(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/files/%s?allowed_projects=%s' % (self.identifier, self.pidentifier), - [new_data], format="json") + response = self.client.patch( + "/rest/files/%s?allowed_projects=%s" % (self.identifier, self.pidentifier), + [new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('json' in response.data['detail'][0], True, 'Error regarding datatype') + self.assertEqual("json" in response.data["detail"][0], True, "Error regarding datatype") # # update list operations PATCH # def test_file_partial_update_list(self): - new_project_identifier = 'changed-project-identifier' - new_project_identifier_2 = 'changed-project-identifier-2' + new_project_identifier = "changed-project-identifier" + new_project_identifier_2 = "changed-project-identifier-2" test_data = {} - test_data['id'] = 1 - test_data['project_identifier'] = new_project_identifier + test_data["id"] = 1 + test_data["project_identifier"] = new_project_identifier second_test_data = {} - second_test_data['id'] = 2 - second_test_data['project_identifier'] = new_project_identifier_2 + second_test_data["id"] = 2 + second_test_data["project_identifier"] = new_project_identifier_2 - response = self.client.patch('/rest/files', [test_data, second_test_data], format="json") + response = self.client.patch("/rest/files", [test_data, second_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data, True, 'response.data should contain list of changed objects') - self.assertEqual(len(response.data['success']), 2, 'response.data should contain 2 changed objects') - self.assertEqual('file_characteristics' in response.data['success'][0]['object'], True, - 'response.data should contain full objects') + self.assertEqual( + "success" in response.data, + True, + "response.data should contain list of changed objects", + ) + self.assertEqual( + len(response.data["success"]), + 2, + "response.data should contain 2 changed objects", + ) + self.assertEqual( + "file_characteristics" in response.data["success"][0]["object"], + True, + "response.data should contain full objects", + ) updated_file = File.objects.get(pk=1) - self.assertEqual(updated_file.project_identifier, new_project_identifier, 'project_identifier did not update') + self.assertEqual( + updated_file.project_identifier, + new_project_identifier, + "project_identifier did not update", + ) def test_file_partial_update_list_allowed_projects_ok(self): new_data_1 = {} - new_data_1['identifier'] = "pid:urn:1" - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["identifier"] = "pid:urn:1" + new_data_1["file_name"] = "Nice_new_name" new_data_2 = {} - new_data_2['identifier'] = "pid:urn:2" - new_data_2['file_name'] = 'Not_so_nice_name' - - res = self.client.patch('/rest/files?allowed_projects=y,z,project_x', [new_data_1, new_data_2], format="json") + new_data_2["identifier"] = "pid:urn:2" + new_data_2["file_name"] = "Not_so_nice_name" + + res = self.client.patch( + "/rest/files?allowed_projects=y,z,project_x", + [new_data_1, new_data_2], + format="json", + ) self.assertEqual(res.status_code, status.HTTP_200_OK, res.data) - self.assertEqual(res.data['success'][0]['object']['file_name'], 'Nice_new_name', res.data) + self.assertEqual(res.data["success"][0]["object"]["file_name"], "Nice_new_name", res.data) def test_file_partial_update_list_allowed_projects_fail(self): # Files in projects 'project_x' and 'research_project_112' - f1 = self.client.get('/rest/files/1').data - f2 = self.client.get('/rest/files/39').data + f1 = self.client.get("/rest/files/1").data + f2 = self.client.get("/rest/files/39").data - response = self.client.patch('/rest/files?allowed_projects=project_x,y,z', [f1, f2], format="json") + response = self.client.patch( + "/rest/files?allowed_projects=project_x,y,z", [f1, f2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_file_partial_update_list_allowed_projects_not_list(self): new_data_1 = {} - new_data_1['identifier'] = "pid:urn:1" - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["identifier"] = "pid:urn:1" + new_data_1["file_name"] = "Nice_new_name" - res = self.client.patch('/rest/files?allowed_projects=y,z,project_x', new_data_1, format="json") + res = self.client.patch( + "/rest/files?allowed_projects=y,z,project_x", new_data_1, format="json" + ) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) def test_file_partial_update_list_allowed_projects_no_identifier(self): new_data_1 = {} - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["file_name"] = "Nice_new_name" new_data_2 = {} - new_data_2['id'] = 23 - new_data_2['file_name'] = 'Not_so_nice_name' - - res = self.client.patch('/rest/files?allowed_projects=y,z,project_x', [new_data_1, new_data_2], format="json") + new_data_2["id"] = 23 + new_data_2["file_name"] = "Not_so_nice_name" + + res = self.client.patch( + "/rest/files?allowed_projects=y,z,project_x", + [new_data_1, new_data_2], + format="json", + ) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) @@ -956,34 +1150,44 @@ class FileApiWriteDeleteTests(FileApiWriteCommon): def test_delete_single_file_ok(self): dir_count_before = Directory.objects.all().count() - response = self.client.delete('/rest/files/1') + response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('deleted_files_count' in response.data, True, response.data) - self.assertEqual(response.data['deleted_files_count'], 1, response.data) + self.assertEqual("deleted_files_count" in response.data, True, response.data) + self.assertEqual(response.data["deleted_files_count"], 1, response.data) dir_count_after = Directory.objects.all().count() - self.assertEqual(dir_count_before, dir_count_after, 'no dirs should have been deleted') + self.assertEqual(dir_count_before, dir_count_after, "no dirs should have been deleted") deleted_file = File.objects_unfiltered.get(pk=1) self._check_project_root_byte_size_and_file_count(deleted_file.project_identifier) - self.assertEqual(deleted_file.date_modified, deleted_file.file_deleted, 'date_modified should be updated') + self.assertEqual( + deleted_file.date_modified, + deleted_file.file_deleted, + "date_modified should be updated", + ) def test_delete_single_file_ok_destroy_leading_dirs(self): - project_identifier = 'project_z' + project_identifier = "project_z" test_data = deepcopy(self.test_new_data) - test_data['file_path'] = '/project_z/some/path/here/%s' % test_data['file_name'] - test_data['project_identifier'] = project_identifier - test_data['identifier'] = 'abc123' - response = self.client.post('/rest/files', test_data, format='json') - self.assertEqual(Directory.objects.filter(project_identifier=project_identifier).exists(), True) - - response = self.client.delete('/rest/files/%s' % response.data['id']) + test_data["file_path"] = "/project_z/some/path/here/%s" % test_data["file_name"] + test_data["project_identifier"] = project_identifier + test_data["identifier"] = "abc123" + response = self.client.post("/rest/files", test_data, format="json") + self.assertEqual( + Directory.objects.filter(project_identifier=project_identifier).exists(), + True, + ) + + response = self.client.delete("/rest/files/%s" % response.data["id"]) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('deleted_files_count' in response.data, True, response.data) - self.assertEqual(response.data['deleted_files_count'], 1, response.data) + self.assertEqual("deleted_files_count" in response.data, True, response.data) + self.assertEqual(response.data["deleted_files_count"], 1, response.data) - self.assertEqual(Directory.objects.filter(project_identifier=project_identifier).exists(), False) + self.assertEqual( + Directory.objects.filter(project_identifier=project_identifier).exists(), + False, + ) def test_delete_single_file_404(self): - response = self.client.delete('/rest/files/doesnotexist') + response = self.client.delete("/rest/files/doesnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_bulk_delete_files_identifiers_not_found(self): @@ -991,8 +1195,8 @@ def test_bulk_delete_files_identifiers_not_found(self): A bulk delete request to /files, but any of the identifiers provided are not found. Should return 404. """ - identifiers = ['nope', 'doesnotexist', 'stillno'] - response = self.client.delete('/rest/files', identifiers, format="json") + identifiers = ["nope", "doesnotexist", "stillno"] + response = self.client.delete("/rest/files", identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) def test_bulk_delete_files_some_identifiers_not_found(self): @@ -1001,13 +1205,15 @@ def test_bulk_delete_files_some_identifiers_not_found(self): Should be ok, delete those files that are found. Assumably those identifiers that were not found did not exist anyway, therefore no harm is done. """ - identifiers = ['nope', 'doesnotexist', 'stillno'] + identifiers = ["nope", "doesnotexist", "stillno"] identifiers.append(File.objects.get(pk=1).identifier) - response = self.client.delete('/rest/files', identifiers, format="json") + response = self.client.delete("/rest/files", identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) removed = File.objects_unfiltered.get(pk=1).removed - self.assertEqual(removed, True, 'file should have been removed') - self._check_project_root_byte_size_and_file_count(File.objects_unfiltered.get(pk=1).project_identifier) + self.assertEqual(removed, True, "file should have been removed") + self._check_project_root_byte_size_and_file_count( + File.objects_unfiltered.get(pk=1).project_identifier + ) def test_bulk_delete_files_in_single_directory_1(self): """ @@ -1020,7 +1226,7 @@ def test_bulk_delete_files_in_single_directory_1(self): all_files_count_before = File.objects.all().count() file_ids = [f.id for f in Directory.objects.get(pk=3).files.all()] - response = self.client.delete('/rest/files', file_ids, format="json") + response = self.client.delete("/rest/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) all_files_count_after = File.objects.all().count() @@ -1033,7 +1239,7 @@ def test_bulk_delete_files_in_single_directory_2(self): all_files_count_before = File.objects.all().count() file_ids = [f.id for f in Directory.objects.get(pk=4).files.all()] - response = self.client.delete('/rest/files', file_ids, format="json") + response = self.client.delete("/rest/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) all_files_count_after = File.objects.all().count() @@ -1044,20 +1250,20 @@ def test_bulk_delete_file_list_one_file_id_missing(self): Otherwise complete set of files, but from one dir one file is missing. Should leave the one file intact, while preserving the directory tree. """ - all_files_count_before = File.objects.filter(project_identifier='project_x').count() - file_ids = [f.id for f in File.objects.filter(project_identifier='project_x')] + all_files_count_before = File.objects.filter(project_identifier="project_x").count() + file_ids = [f.id for f in File.objects.filter(project_identifier="project_x")] # everything except the last file should be removed file_ids.pop() - response = self.client.delete('/rest/files', file_ids, format="json") + response = self.client.delete("/rest/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - all_files_after = File.objects.filter(project_identifier='project_x') + all_files_after = File.objects.filter(project_identifier="project_x") self.assertEqual(all_files_after.count(), all_files_count_before - len(file_ids)) expected_dirs_count = self._count_dirs_from_path(all_files_after[0].file_path) - actual_dirs_count = Directory.objects.filter(project_identifier='project_x').count() + actual_dirs_count = Directory.objects.filter(project_identifier="project_x").count() self.assertEqual(actual_dirs_count, expected_dirs_count) def test_bulk_delete_files_from_root(self): @@ -1066,16 +1272,23 @@ def test_bulk_delete_files_from_root(self): so the whole tree should end up being deleted. """ files_to_remove_count = 20 - file_ids = File.objects.filter(project_identifier='project_x').values_list('id', flat=True) + file_ids = File.objects.filter(project_identifier="project_x").values_list("id", flat=True) self.assertEqual(len(file_ids), files_to_remove_count) - response = self.client.delete('/rest/files', file_ids, format="json") + response = self.client.delete("/rest/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('deleted_files_count', None), files_to_remove_count, response.data) - - self._assert_files_available_and_removed('project_x', 0, files_to_remove_count) - self.assertEqual(Directory.objects_unfiltered.filter(project_identifier='project_x').count(), 0, - 'all dirs should have been permanently removed') + self.assertEqual( + response.data.get("deleted_files_count", None), + files_to_remove_count, + response.data, + ) + + self._assert_files_available_and_removed("project_x", 0, files_to_remove_count) + self.assertEqual( + Directory.objects_unfiltered.filter(project_identifier="project_x").count(), + 0, + "all dirs should have been permanently removed", + ) def test_bulk_delete_sub_directory_1(self): """ @@ -1087,17 +1300,21 @@ def test_bulk_delete_sub_directory_1(self): file_ids += [f.id for f in Directory.objects.get(pk=6).files.all()] self.assertEqual(len(file_ids), files_to_remove_count) - response = self.client.delete('/rest/files', file_ids, format="json") + response = self.client.delete("/rest/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('deleted_files_count', None), files_to_remove_count, response.data) + self.assertEqual( + response.data.get("deleted_files_count", None), + files_to_remove_count, + response.data, + ) - self._assert_files_available_and_removed('project_x', 5, files_to_remove_count) + self._assert_files_available_and_removed("project_x", 5, files_to_remove_count) # these dirs should still be left: # / # /project_x_FROZEN # /project_x_FROZEN/Experiment_X (has 5 files) - self.assertEqual(Directory.objects.filter(project_identifier='project_x').count(), 3) + self.assertEqual(Directory.objects.filter(project_identifier="project_x").count(), 3) def test_bulk_delete_sub_directory_2(self): """ @@ -1108,36 +1325,51 @@ def test_bulk_delete_sub_directory_2(self): file_ids = [f.id for f in Directory.objects.get(pk=6).files.all()] self.assertEqual(len(file_ids), files_to_remove_count) - response = self.client.delete('/rest/files', file_ids, format="json") + response = self.client.delete("/rest/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('deleted_files_count', None), files_to_remove_count, response.data) + self.assertEqual( + response.data.get("deleted_files_count", None), + files_to_remove_count, + response.data, + ) - self._assert_files_available_and_removed('project_x', 10, files_to_remove_count) + self._assert_files_available_and_removed("project_x", 10, files_to_remove_count) # these dirs should still be left: # / # /project_x_FROZEN # /project_x_FROZEN/Experiment_X (5 files) # /project_x_FROZEN/Experiment_X/Phase_1 (5 files) - self.assertEqual(Directory.objects.filter(project_identifier='project_x').count(), 4) + self.assertEqual(Directory.objects.filter(project_identifier="project_x").count(), 4) # /project_x_FROZEN/Experiment_X/Phase_1/2017 <- this dir should be deleted, since # it only contained the 01-dir, which we specifically targeted for deletion - self.assertEqual(Directory.objects.filter( - project_identifier='project_x', - directory_path='/project_x_FROZEN/Experiment_X/Phase_1/2017' - ).count(), 0, 'dir should have been deleted') + self.assertEqual( + Directory.objects.filter( + project_identifier="project_x", + directory_path="/project_x_FROZEN/Experiment_X/Phase_1/2017", + ).count(), + 0, + "dir should have been deleted", + ) def _assert_files_available_and_removed(self, project_identifier, available, removed): """ After deleting files, check qty of files retrievable by usual means is as expected, and qty of files retrievable from objects_unfiltered with removed=True is as expected. """ - self.assertEqual(File.objects.filter(project_identifier=project_identifier).count(), available, - 'files should not be retrievable from removed=False scope') - self.assertEqual(File.objects_unfiltered.filter(project_identifier=project_identifier, removed=True).count(), - removed, - 'files should be retrievable from removed=True scope') + self.assertEqual( + File.objects.filter(project_identifier=project_identifier).count(), + available, + "files should not be retrievable from removed=False scope", + ) + self.assertEqual( + File.objects_unfiltered.filter( + project_identifier=project_identifier, removed=True + ).count(), + removed, + "files should be retrievable from removed=True scope", + ) def test_deleting_files_deprecates_datasets(self): for cr in CatalogRecord.objects.filter(deprecated=True): @@ -1146,33 +1378,37 @@ def test_deleting_files_deprecates_datasets(self): cr.force_save() datasets_with_file = CatalogRecord.objects.filter(files__id=1).count() - response = self.client.delete('/rest/files/1') + response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(CatalogRecord.objects.filter(deprecated=True).count(), datasets_with_file) class FileApiWriteRestoreTests(FileApiWriteCommon): - def test_restore_files_ok(self): """ Restore a few deleted files from directories, that still contain other files. Restored files should be appended to previously existing files. """ - response = self.client.delete('/rest/files/1') - response = self.client.delete('/rest/files/2') - response = self.client.delete('/rest/files/3') + response = self.client.delete("/rest/files/1") + response = self.client.delete("/rest/files/2") + response = self.client.delete("/rest/files/3") self.assertEqual(response.status_code, status.HTTP_200_OK) - deleted_files = File.objects_unfiltered.filter(pk__in=[1, 2, 3]) \ - .values('identifier', 'parent_directory_id') + deleted_files = File.objects_unfiltered.filter(pk__in=[1, 2, 3]).values( + "identifier", "parent_directory_id" + ) - response = self.client.post('/rest/files/restore', [f['identifier'] for f in deleted_files], format='json') + response = self.client.post( + "/rest/files/restore", + [f["identifier"] for f in deleted_files], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('restored_files_count' in response.data, True, response.data) - self.assertEqual(response.data['restored_files_count'], 3, response.data) + self.assertEqual("restored_files_count" in response.data, True, response.data) + self.assertEqual(response.data["restored_files_count"], 3, response.data) # ensure restored files are using previously existing directories - old_parent_dirs = { f['parent_directory_id'] for f in deleted_files } + old_parent_dirs = {f["parent_directory_id"] for f in deleted_files} files = File.objects.filter(pk__in=[1, 2, 3]) for f in files: self.assertEqual(f.file_deleted, None) @@ -1185,20 +1421,25 @@ def test_restore_files_recreate_missing_directories(self): """ proj = File.objects.get(pk=1).project_identifier - response = self.client.get('/rest/files?project_identifier=%s&fields=identifier&no_pagination=true' - % proj, format='json') - file_identifiers = [ f['identifier'] for f in response.data ] + response = self.client.get( + "/rest/files?project_identifier=%s&fields=identifier&no_pagination=true" % proj, + format="json", + ) + file_identifiers = [f["identifier"] for f in response.data] - self.client.delete('/rest/files', file_identifiers, format='json') + self.client.delete("/rest/files", file_identifiers, format="json") - deleted_directory_ids = File.objects_unfiltered.filter(identifier__in=file_identifiers) \ - .values_list('parent_directory_id', flat=True) - old_parent_dirs = { id for id in deleted_directory_ids } + deleted_directory_ids = File.objects_unfiltered.filter( + identifier__in=file_identifiers + ).values_list("parent_directory_id", flat=True) + old_parent_dirs = {id for id in deleted_directory_ids} - response = self.client.post('/rest/files/restore', file_identifiers, format='json') + response = self.client.post("/rest/files/restore", file_identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('restored_files_count' in response.data, True, response.data) - self.assertEqual(response.data['restored_files_count'], len(file_identifiers), response.data) + self.assertEqual("restored_files_count" in response.data, True, response.data) + self.assertEqual( + response.data["restored_files_count"], len(file_identifiers), response.data + ) # ensure restored files are using new directories files = File.objects.filter(identifier__in=file_identifiers) @@ -1206,15 +1447,21 @@ def test_restore_files_recreate_missing_directories(self): self.assertEqual(f.parent_directory_id in old_parent_dirs, False) def test_check_parameter_is_string_list(self): - response = self.client.post('/rest/files/restore', ['a', 'b', 1], format='json') + response = self.client.post("/rest/files/restore", ["a", "b", 1], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_check_files_belong_to_one_project(self): f1 = File.objects_unfiltered.get(pk=1) - f2 = File.objects_unfiltered.filter().exclude(project_identifier=f1.project_identifier).first() - response = self.client.delete('/rest/files/%d' % f1.id) - response = self.client.delete('/rest/files/%d' % f2.id) - response = self.client.post('/rest/files/restore', [ f1.identifier, f2.identifier ], format='json') + f2 = ( + File.objects_unfiltered.filter() + .exclude(project_identifier=f1.project_identifier) + .first() + ) + response = self.client.delete("/rest/files/%d" % f1.id) + response = self.client.delete("/rest/files/%d" % f2.id) + response = self.client.post( + "/rest/files/restore", [f1.identifier, f2.identifier], format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) @@ -1224,52 +1471,66 @@ class FileApiWriteXmlTests(FileApiWriteCommon): """ def test_xml_api(self): - content_type = 'application/xml' - data = 'tauhketta yeah' + content_type = "application/xml" + data = ( + 'tauhketta yeah' + ) # create - response = self.client.post('/rest/files/1/xml?namespace=breh', data, content_type=content_type) + response = self.client.post( + "/rest/files/1/xml?namespace=breh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) - self.assertEqual('updated stuff' - response = self.client.put('/rest/files/1/xml?namespace=breh', data, content_type=content_type) + response = self.client.put( + "/rest/files/1/xml?namespace=breh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) # get updated again - response = self.client.get('/rest/files/1/xml?namespace=breh', content_type=content_type, ) - self.assertEqual('updated stuff' in response.data, True) + response = self.client.get( + "/rest/files/1/xml?namespace=breh", + content_type=content_type, + ) + self.assertEqual("updated stuff" in response.data, True) # delete - response = self.client.delete('/rest/files/1/xml?namespace=breh', data, content_type=content_type) + response = self.client.delete( + "/rest/files/1/xml?namespace=breh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) - response = self.client.delete('/rest/files/1/xml?namespace=bruh', data, content_type=content_type) + response = self.client.delete( + "/rest/files/1/xml?namespace=bruh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) # get list - response = self.client.get('/rest/files/1/xml', content_type=content_type) + response = self.client.get("/rest/files/1/xml", content_type=content_type) self.assertEqual(response.status_code in (200, 201, 204), True) class FileApiWriteEndUserAccess(FileApiWriteCommon): - def setUp(self): super().setUp() self.token = get_test_oidc_token() @@ -1277,110 +1538,108 @@ def setUp(self): @responses.activate def test_user_cant_create_files(self): - ''' + """ Ensure users are unable to create new files. - ''' + """ # ensure user belongs to same project - self.token['group_names'].append('IDA01:%s' % self.test_new_data['project_identifier']) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % self.test_new_data["project_identifier"]) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.post('/rest/files', self.test_new_data, format="json") + response = self.client.post("/rest/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @responses.activate def test_user_can_only_update_permitted_file_fields(self): - ''' + """ Ensure users are only able to modify permitted fields. - ''' + """ # ensure user belongs to same project proj = File.objects.get(pk=1).project_identifier - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get('/rest/files/1', format="json") + response = self.client.get("/rest/files/1", format="json") file = response.data original_file = deepcopy(file) - file['byte_size'] = 200 - file['checksum']['value'] = 'changed' - file['parent_directory'] = 1 - file['file_frozen'] = '3' + file['file_frozen'][1:] - file['file_format'] = 'changed' - file['file_name'] = 'changed' - file['file_path'] = '/oh/no' - file['file_storage'] = 2 - file['file_uploaded'] = '3' + file['file_uploaded'][1:] - file['identifier'] = 'changed' - file['open_access'] = True - file['project_identifier'] = 'changed' - file['service_modified'] = 'changed' - file['service_created'] = 'changed' - file['removed'] = True + file["byte_size"] = 200 + file["checksum"]["value"] = "changed" + file["parent_directory"] = 1 + file["file_frozen"] = "3" + file["file_frozen"][1:] + file["file_format"] = "changed" + file["file_name"] = "changed" + file["file_path"] = "/oh/no" + file["file_storage"] = 2 + file["file_uploaded"] = "3" + file["file_uploaded"][1:] + file["identifier"] = "changed" + file["open_access"] = True + file["project_identifier"] = "changed" + file["service_modified"] = "changed" + file["service_created"] = "changed" + file["removed"] = True # the only field that should be changed - file['file_characteristics'] = { 'title': 'new title'} + file["file_characteristics"] = {"title": "new title"} - response = self.client.put('/rest/files/1', file, format="json") + response = self.client.put("/rest/files/1", file, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['file_characteristics']['title'], 'new title', response.data) + self.assertEqual(response.data["file_characteristics"]["title"], "new title", response.data) for key, value in response.data.items(): try: - if key in ('date_modified', 'file_modified'): + if key in ("date_modified", "file_modified"): # these fields are changed by metax continue - elif key == 'file_characteristics': + elif key == "file_characteristics": # the field that should have been changed by the user self.assertNotEqual(original_file[key], response.data[key]) else: # must not have changed self.assertEqual(original_file[key], response.data[key]) except KeyError as e: - if e.args[0] == 'user_modified': + if e.args[0] == "user_modified": # added by metax continue raise @responses.activate def test_user_can_update_files_in_their_projects(self): - ''' + """ Ensure users can edit files in projects they are a member of. - ''' - proj = File.objects.only('project_identifier').get(pk=1).project_identifier + """ + proj = File.objects.only("project_identifier").get(pk=1).project_identifier - response = self.client.get('/rest/files?project_identifier=%s' % proj, - format="json") + response = self.client.get("/rest/files?project_identifier=%s" % proj, format="json") - file = response.data['results'][0] + file = response.data["results"][0] - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.put('/rest/files/%s' % file['id'], file, format="json") + response = self.client.put("/rest/files/%s" % file["id"], file, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.put('/rest/files', [file], format="json") + response = self.client.put("/rest/files", [file], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) @responses.activate def test_user_cant_update_files_in_others_projects(self): - ''' + """ Ensure users can not edit files in projects they are not a member of. - ''' - proj = File.objects.only('project_identifier').get(pk=1).project_identifier + """ + proj = File.objects.only("project_identifier").get(pk=1).project_identifier - response = self.client.get('/rest/files?project_identifier=%s' % proj, - format="json") + response = self.client.get("/rest/files?project_identifier=%s" % proj, format="json") - file = response.data['results'][0] + file = response.data["results"][0] - self.token['group_names'] = ['no_files_for_this_project'] - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"] = ["no_files_for_this_project"] + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.put('/rest/files/%s' % file['id'], file, format="json") + response = self.client.put("/rest/files/%s" % file["id"], file, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.put('/rest/files', [file], format="json") + response = self.client.put("/rest/files", [file], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) @@ -1398,8 +1657,14 @@ def test_dryrun(self): Ensure query parameter ?dryrun=true returns same result as they normally would, but changes made during the request do not get saved in the db. """ - response = self.client.post('/rest/files?what&dryrun=true&other', self.test_new_data, format="json") + response = self.client.post( + "/rest/files?what&dryrun=true&other", self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('id' in response.data, True) - found = File.objects.filter(pk=response.data['id']).exists() - self.assertEqual(found, False, 'file should not get truly created when using parameter dryrun') + self.assertEqual("id" in response.data, True) + found = File.objects.filter(pk=response.data["id"]).exists() + self.assertEqual( + found, + False, + "file should not get truly created when using parameter dryrun", + ) diff --git a/src/metax_api/tests/api/rest/base/views/filestorages/read.py b/src/metax_api/tests/api/rest/base/views/filestorages/read.py index 1833c58a..aad76fe9 100755 --- a/src/metax_api/tests/api/rest/base/views/filestorages/read.py +++ b/src/metax_api/tests/api/rest/base/views/filestorages/read.py @@ -14,13 +14,12 @@ class FileStorageApiReadBasicTests(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileStorageApiReadBasicTests, cls).setUpClass() def setUp(self): @@ -28,13 +27,17 @@ def setUp(self): def test_basic_get(self): fs = FileStorage.objects.get(pk=1) - response = self.client.get('/rest/filestorages/%d' % fs.id) + response = self.client.get("/rest/filestorages/%d" % fs.id) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/filestorages/%s' % fs.file_storage_json['identifier']) + response = self.client.get("/rest/filestorages/%s" % fs.file_storage_json["identifier"]) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_basic_list(self): - response = self.client.get('/rest/filestorages') + response = self.client.get("/rest/filestorages") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['results']), FileStorage.objects.all().count(), response.data) + self.assertEqual( + len(response.data["results"]), + FileStorage.objects.all().count(), + response.data, + ) diff --git a/src/metax_api/tests/api/rest/base/views/filestorages/write.py b/src/metax_api/tests/api/rest/base/views/filestorages/write.py index df7183fb..2bdc6f0b 100755 --- a/src/metax_api/tests/api/rest/base/views/filestorages/write.py +++ b/src/metax_api/tests/api/rest/base/views/filestorages/write.py @@ -14,39 +14,40 @@ class FileStorageApiWriteCommon(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileStorageApiWriteCommon, cls).setUpClass() def setUp(self): - self.new_test_data = self._get_object_from_test_data('filestorage') - self.new_test_data.pop('id') - self.new_test_data['file_storage_json']['identifier'] = 'new-file-storage' + self.new_test_data = self._get_object_from_test_data("filestorage") + self.new_test_data.pop("id") + self.new_test_data["file_storage_json"]["identifier"] = "new-file-storage" self._use_http_authorization() class FileStorageApiWriteBasicTests(FileStorageApiWriteCommon): - def test_create(self): - response = self.client.post('/rest/filestorages', self.new_test_data, format="json") + response = self.client.post("/rest/filestorages", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_identifier_already_exists(self): - response = self.client.post('/rest/filestorages', self.new_test_data, format="json") + response = self.client.post("/rest/filestorages", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - response = self.client.post('/rest/filestorages', self.new_test_data, format="json") + response = self.client.post("/rest/filestorages", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('already exists' in response.data['file_storage_json']['identifier'][0], - True, response.data) + self.assertEqual( + "already exists" in response.data["file_storage_json"]["identifier"][0], + True, + response.data, + ) def test_delete(self): - response = self.client.delete('/rest/filestorages/1') + response = self.client.delete("/rest/filestorages/1") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) fs = FileStorage.objects_unfiltered.get(pk=1) - self.assertEqual(fs.removed, True, 'should be deleted') + self.assertEqual(fs.removed, True, "should be deleted") self.assertEqual(fs.date_removed, fs.date_modified) diff --git a/src/metax_api/tests/api/rest/base/views/schemas/read.py b/src/metax_api/tests/api/rest/base/views/schemas/read.py index eea13fd6..74a83f94 100755 --- a/src/metax_api/tests/api/rest/base/views/schemas/read.py +++ b/src/metax_api/tests/api/rest/base/views/schemas/read.py @@ -12,25 +12,24 @@ class SchemaApiReadTests(APITestCase, TestClassUtils): - def test_read_schemas_list(self): - response = self.client.get('/rest/schemas') + response = self.client.get("/rest/schemas") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data['count'] > 0) + self.assertTrue(response.data["count"] > 0) def test_read_schemas_list_html(self): - headers = {'HTTP_ACCEPT': 'text/html'} - response = self.client.get('/rest/schemas', **headers) + headers = {"HTTP_ACCEPT": "text/html"} + response = self.client.get("/rest/schemas", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response._headers['content-type'][1].find('text/html') >= 0) + self.assertTrue(response._headers["content-type"][1].find("text/html") >= 0) def test_read_schema_retrieve_existing(self): - list_response = self.client.get('/rest/schemas') + list_response = self.client.get("/rest/schemas") self.assertEqual(list_response.status_code, status.HTTP_200_OK) - self.assertTrue(list_response.data['count'] > 0, 'No schemas available') - response = self.client.get('/rest/schemas/%s' % list_response.data['results'][0]) + self.assertTrue(list_response.data["count"] > 0, "No schemas available") + response = self.client.get("/rest/schemas/%s" % list_response.data["results"][0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_schema_not_exists(self): - response = self.client.get('/rest/schemas/thisshouldnotexist') + response = self.client.get("/rest/schemas/thisshouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) diff --git a/src/metax_api/tests/api/rest/v2/views/apierrors/read.py b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py index 4610453b..ace87cd8 100755 --- a/src/metax_api/tests/api/rest/v2/views/apierrors/read.py +++ b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py @@ -27,7 +27,7 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(ApiErrorReadBasicTests, cls).setUpClass() def setUp(self): @@ -41,114 +41,123 @@ def _assert_fields_presence(self, response): Check presence and absence of some key information. """ self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('data' in response.data, True, response.data) - self.assertEqual('response' in response.data, True, response.data) - self.assertEqual('traceback' in response.data, True, response.data) - self.assertEqual('url' in response.data, True, response.data) - self.assertEqual('HTTP_AUTHORIZATION' in response.data['headers'], False, response.data['headers']) + self.assertEqual("data" in response.data, True, response.data) + self.assertEqual("response" in response.data, True, response.data) + self.assertEqual("traceback" in response.data, True, response.data) + self.assertEqual("url" in response.data, True, response.data) + self.assertEqual( + "HTTP_AUTHORIZATION" in response.data["headers"], + False, + response.data["headers"], + ) def test_list_errors(self): """ Each requesting resulting in an error should leave behind one API error entry. """ - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1.pop('data_catalog') # causes an error + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1.pop("data_catalog") # causes an error - response = self.client.post('/rest/v2/datasets', cr_1, format='json') + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.post('/rest/v2/datasets', cr_1, format='json') + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_get_error_details(self): - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1.pop('data_catalog') # causes an error - cr_1['research_dataset']['title'] = { 'en': 'Abc' } + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1.pop("data_catalog") # causes an error + cr_1["research_dataset"]["title"] = {"en": "Abc"} - response = self.client.post('/rest/v2/datasets', cr_1, format='json') + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # list errors in order to get error identifier - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('identifier' in response.data[0], True, response.data) + self.assertEqual("identifier" in response.data[0], True, response.data) - response = self.client.get('/rest/v2/apierrors/%s' % response.data[0]['identifier']) + response = self.client.get("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) self._assert_fields_presence(response) - self.assertEqual('data_catalog' in response.data['response'], True, response.data['response']) - self.assertEqual(response.data['data']['research_dataset']['title']['en'], 'Abc', - response.data['data']['research_dataset']['title']) + self.assertEqual( + "data_catalog" in response.data["response"], True, response.data["response"] + ) + self.assertEqual( + response.data["data"]["research_dataset"]["title"]["en"], + "Abc", + response.data["data"]["research_dataset"]["title"], + ) def test_delete_error_details(self): - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1.pop('data_catalog') # causes an error + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1.pop("data_catalog") # causes an error - response = self.client.post('/rest/v2/datasets', cr_1, format='json') + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/apierrors') - response = self.client.delete('/rest/v2/apierrors/%s' % response.data[0]['identifier']) + response = self.client.get("/rest/v2/apierrors") + response = self.client.delete("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_delete_all_error_details(self): - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1.pop('data_catalog') # causes an error + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1.pop("data_catalog") # causes an error - response = self.client.post('/rest/v2/datasets', cr_1, format='json') + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - response = self.client.post('/rest/v2/datasets', cr_1, format='json') + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # ensure something was produced... - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") - response = self.client.post('/rest/v2/apierrors/flush') + response = self.client.post("/rest/v2/apierrors/flush") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_bulk_operation_produces_error_entry(self): """ Ensure also bulk operations produce error entries. """ - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1.pop('data_catalog') # causes an error - response = self.client.post('/rest/v2/datasets', [cr_1, cr_1], format='json') + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1.pop("data_catalog") # causes an error + response = self.client.post("/rest/v2/datasets", [cr_1, cr_1], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/apierrors/%s' % response.data[0]['identifier']) + response = self.client.get("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) self._assert_fields_presence(response) - self.assertEqual('other' in response.data, True, response.data) - self.assertEqual('bulk_request' in response.data['other'], True, response.data) - self.assertEqual('data_row_count' in response.data['other'], True, response.data) + self.assertEqual("other" in response.data, True, response.data) + self.assertEqual("bulk_request" in response.data["other"], True, response.data) + self.assertEqual("data_row_count" in response.data["other"], True, response.data) def test_api_permitted_only_to_metax_user(self): # uses testuser by default self._use_http_authorization() - response = self.client.get('/rest/v2/apierrors') + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.get('/rest/v2/apierrors/123') + response = self.client.get("/rest/v2/apierrors/123") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.delete('/rest/v2/apierrors/123') + response = self.client.delete("/rest/v2/apierrors/123") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.post('/rest/v2/apierrors/flush_errors') + response = self.client.post("/rest/v2/apierrors/flush_errors") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) diff --git a/src/metax_api/tests/api/rest/v2/views/common/auth.py b/src/metax_api/tests/api/rest/v2/views/common/auth.py index ea5c9730..125ff3bd 100755 --- a/src/metax_api/tests/api/rest/v2/views/common/auth.py +++ b/src/metax_api/tests/api/rest/v2/views/common/auth.py @@ -15,6 +15,7 @@ _logger = logging.getLogger(__name__) + class ApiServiceAccessAuthorization(CatalogRecordApiWriteCommon): """ @@ -24,13 +25,13 @@ class ApiServiceAccessAuthorization(CatalogRecordApiWriteCommon): def setUp(self): super().setUp() # test user api_auth_user has some custom api permissions set in settings.py - self._use_http_authorization(username='api_auth_user') + self._use_http_authorization(username="api_auth_user") def test_read_access_ok(self): """ User api_auth_user should have read access to files api. """ - response = self.client.get('/rest/v2/files/1') + response = self.client.get("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_access_fail(self): @@ -39,36 +40,36 @@ def test_read_access_fail(self): about the existence of requested file. """ self.client._credentials = {} - response = self.client.get('/rest/v2/files/1') + response = self.client.get("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_create_access_ok(self): """ User api_auth_user should have create access to datasets api. """ - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") cr = response.data - cr['contract'] = 1 - response = self.client.put('/rest/v2/datasets/1', cr, format='json') + cr["contract"] = 1 + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_update_access_error(self): """ User api_auth_user should not have update access to files api. """ - response = self.client.get('/rest/v2/files/1') + response = self.client.get("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) file = response.data - file['file_format'] = 'text/html' + file["file_format"] = "text/html" - response = self.client.put('/rest/v2/files/1', file, format='json') + response = self.client.put("/rest/v2/files/1", file, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_delete_access_error(self): """ User api_auth_user should not have delete access to files api. """ - response = self.client.delete('/rest/v2/files/1') + response = self.client.delete("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_read_for_world_ok(self): @@ -76,7 +77,7 @@ def test_read_for_world_ok(self): Reading datasets api should be permitted even without any authorization. """ self.client._credentials = {} - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -92,7 +93,7 @@ class ApiEndUserAccessAuthorization(CatalogRecordApiWriteCommon): def setUp(self): super().setUp() - self._use_http_authorization(method='bearer', token=get_test_oidc_token()) + self._use_http_authorization(method="bearer", token=get_test_oidc_token()) @responses.activate def test_valid_token(self): @@ -101,7 +102,7 @@ def test_valid_token(self): valid authentication works. Should return successfully. """ self._mock_token_validation_succeeds() - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK) @responses.activate @@ -123,7 +124,7 @@ def test_invalid_token(self): In all cases, metax code execution stops at the middleware where authentication failed. """ self._mock_token_validation_fails() - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @responses.activate @@ -134,11 +135,11 @@ def test_end_user_read_access(self): self._mock_token_validation_succeeds() # datasets-api should be allowed for end users - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK) # contracts-api should not be allowed for end users - response = self.client.get('/rest/v2/contracts/1') + response = self.client.get("/rest/v2/contracts/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @responses.activate @@ -148,5 +149,5 @@ def test_end_user_create_access_error(self): """ self._mock_token_validation_succeeds() # end users should not have create access to files api. - response = self.client.post('/rest/v2/files', {}, format='json') + response = self.client.post("/rest/v2/files", {}, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) diff --git a/src/metax_api/tests/api/rest/v2/views/common/read.py b/src/metax_api/tests/api/rest/v2/views/common/read.py index f902c97e..688a376d 100755 --- a/src/metax_api/tests/api/rest/v2/views/common/read.py +++ b/src/metax_api/tests/api/rest/v2/views/common/read.py @@ -28,11 +28,11 @@ def test_removed_query_param(self): obj2 = CatalogRecord.objects.get(pk=2) obj2.removed = True obj2.force_save() - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - response = self.client.get('/rest/v2/datasets/1?removed=true') + response = self.client.get("/rest/v2/datasets/1?removed=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/v2/datasets/metadata_version_identifiers') + response = self.client.get("/rest/v2/datasets/metadata_version_identifiers") self.assertEqual(obj.metadata_version_identifier not in response.data, True) self.assertEqual(obj2.metadata_version_identifier not in response.data, True) @@ -42,31 +42,39 @@ def test_removed_query_param(self): obj2 = File.objects.get(pk=2) obj2.removed = True obj2.force_save() - response = self.client.get('/rest/v2/files/1') + response = self.client.get("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - response = self.client.get('/rest/v2/files/1?removed=true') + response = self.client.get("/rest/v2/files/1?removed=true") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_removed_parameter_gets_correct_amount_of_objects(self): - path = '/rest/v2/datasets' + path = "/rest/v2/datasets" objects = CatalogRecord.objects.all().values() - results = self.client.get('{0}?pagination=false&removed=false'.format(path)).json() + results = self.client.get("{0}?pagination=false&removed=false".format(path)).json() initial_amt = len(results) - results = self.client.get('{0}?pagination=false&removed=true'.format(path)).json() - self.assertEqual(len(results), 0, "Without removed objects remove=true should return 0 results") + results = self.client.get("{0}?pagination=false&removed=true".format(path)).json() + self.assertEqual( + len(results), + 0, + "Without removed objects remove=true should return 0 results", + ) self._use_http_authorization() amt_to_delete = 2 for i in range(amt_to_delete): - response = self.client.delete('{0}/{1}'.format(path, objects[i]['id'])) + response = self.client.delete("{0}/{1}".format(path, objects[i]["id"])) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - results = self.client.get('{0}?pagination=false&removed=false'.format(path)).json() - self.assertEqual(len(results), initial_amt - amt_to_delete, "Non-removed object amount is incorrect") + results = self.client.get("{0}?pagination=false&removed=false".format(path)).json() + self.assertEqual( + len(results), + initial_amt - amt_to_delete, + "Non-removed object amount is incorrect", + ) - results = self.client.get('{0}?pagination=false&removed=true'.format(path)).json() + results = self.client.get("{0}?pagination=false&removed=true".format(path)).json() self.assertEqual(len(results), amt_to_delete, "Removed object amount is incorrect") @@ -77,43 +85,61 @@ class ApiReadPaginationTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_list_pagination_1(self): - for param in ['pagination=true', 'pagination', '']: - response = self.client.get('/rest/datasets?{}&limit=2&offset=0'.format(param)) + for param in ["pagination=true", "pagination", ""]: + response = self.client.get("/rest/datasets?{}&limit=2&offset=0".format(param)) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['results']), 2, 'There should have been exactly two results') - self.assertEqual(response.data['results'][0]['id'], 1, 'Id of first result should have been 1') + self.assertEqual( + len(response.data["results"]), + 2, + "There should have been exactly two results", + ) + self.assertEqual( + response.data["results"][0]["id"], + 1, + "Id of first result should have been 1", + ) def test_read_catalog_record_list_pagination_2(self): - for param in ['pagination=true', 'pagination', '']: - response = self.client.get('/rest/datasets?{}&limit=2&offset=2'.format(param)) + for param in ["pagination=true", "pagination", ""]: + response = self.client.get("/rest/datasets?{}&limit=2&offset=2".format(param)) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['results']), 2, 'There should have been exactly two results') - self.assertEqual(response.data['results'][0]['id'], 3, 'Id of first result should have been 3') + self.assertEqual( + len(response.data["results"]), + 2, + "There should have been exactly two results", + ) + self.assertEqual( + response.data["results"][0]["id"], + 3, + "Id of first result should have been 3", + ) def test_disable_pagination(self): - response = self.client.get('/rest/datasets?pagination=false') + response = self.client.get("/rest/datasets?pagination=false") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('next' not in response.data, True) - self.assertEqual('results' not in response.data, True) + self.assertEqual("next" not in response.data, True) + self.assertEqual("results" not in response.data, True) def test_pagination_ordering(self): limit = 5 - for order in ('preservation_state', '-preservation_state'): + for order in ("preservation_state", "-preservation_state"): # vary offset from 0 to 20, in increments of 5 for offset in range(0, 20, 5): - response = self.client.get(f'/rest/v2/datasets?limit={limit}&offset={offset}&ordering={order}') + response = self.client.get( + f"/rest/v2/datasets?limit={limit}&offset={offset}&ordering={order}" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - from_api = [cr['preservation_state'] for cr in response.data['results']] + from_api = [cr["preservation_state"] for cr in response.data["results"]] from_db = [ - r for r in CatalogRecord.objects - .filter() + r + for r in CatalogRecord.objects.filter() .order_by(order) - .values_list('preservation_state', flat=True)[offset:offset + limit] + .values_list("preservation_state", flat=True)[offset : offset + limit] ] self.assertEqual(from_api, from_db) @@ -132,33 +158,35 @@ class ApiReadHTTPHeaderTests(CatalogRecordApiReadCommon): def test_get_with_if_modified_since_header_ok(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz('GMT')) + date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - if_modified_since_header_value = date_modified_in_gmt.strftime('%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets/%s' % self.identifier, **headers) + if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S GMT") + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) if_modified_since_header_value = (date_modified_in_gmt + timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets/%s' % self.identifier, **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) if_modified_since_header_value = (date_modified_in_gmt - timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets/%s' % self.identifier, **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_with_if_modified_since_header_syntax_error(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz('GMT')) + date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - if_modified_since_header_value = date_modified_in_gmt.strftime('%a, %d %b %Y %H:%M:%S UTC') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets/%s' % self.identifier, **headers) + if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S UTC") + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # @@ -170,41 +198,43 @@ def test_get_with_if_modified_since_header_syntax_error(self): def test_list_get_with_if_modified_since_header_ok(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz('GMT')) + date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - if_modified_since_header_value = date_modified_in_gmt.strftime('%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets?limit=100', **headers) + if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S GMT") + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) == 6) + self.assertTrue(len(response.data.get("results")) == 6) if_modified_since_header_value = (date_modified_in_gmt + timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets?limit=100', **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) == 6) + self.assertTrue(len(response.data.get("results")) == 6) # The asserts below may brake if the date_modified timestamps or the amount of test data objects are altered # in the test data if_modified_since_header_value = (date_modified_in_gmt - timedelta(seconds=1)).strftime( - '%a, %d %b %Y %H:%M:%S GMT') - headers = {'HTTP_IF_MODIFIED_SINCE': if_modified_since_header_value} - response = self.client.get('/rest/v2/datasets?limit=100', **headers) + "%a, %d %b %Y %H:%M:%S GMT" + ) + headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} + response = self.client.get("/rest/v2/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) > 6) - self.assertTrue(len(response.data.get('results')) == 28) + self.assertTrue(len(response.data.get("results")) > 6) + self.assertTrue(len(response.data.get("results")) == 28) # should also work with records that have been recently created, and date_modified is empty cr.date_created = date_modified cr.date_modified = None cr.force_save() - response = self.client.get('/rest/v2/datasets?limit=100', **headers) + response = self.client.get("/rest/v2/datasets?limit=100", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get('results')) > 6) - self.assertTrue(len(response.data.get('results')) == 28) + self.assertTrue(len(response.data.get("results")) > 6) + self.assertTrue(len(response.data.get("results")) == 28) class ApiReadQueryParamTests(CatalogRecordApiReadCommon): @@ -217,28 +247,28 @@ def test_return_requested_fields_only(self): """ While the param ?fields works with write operations too, the primary use case is when GETting. """ - response = self.client.get('/rest/v2/datasets?fields=identifier') + response = self.client.get("/rest/v2/datasets?fields=identifier") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('identifier' in response.data['results'][0], True) - self.assertEqual(len(response.data['results'][0].keys()), 1) - self.assertEqual(len(response.data['results'][1].keys()), 1) + self.assertEqual("identifier" in response.data["results"][0], True) + self.assertEqual(len(response.data["results"][0].keys()), 1) + self.assertEqual(len(response.data["results"][1].keys()), 1) - response = self.client.get('/rest/v2/datasets/1?fields=identifier') + response = self.client.get("/rest/v2/datasets/1?fields=identifier") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('identifier' in response.data, True) + self.assertEqual("identifier" in response.data, True) self.assertEqual(len(response.data.keys()), 1) - response = self.client.get('/rest/v2/datasets/1?fields=identifier,data_catalog') - self.assertEqual('identifier' in response.data, True) - self.assertEqual('data_catalog' in response.data, True) + response = self.client.get("/rest/v2/datasets/1?fields=identifier,data_catalog") + self.assertEqual("identifier" in response.data, True) + self.assertEqual("data_catalog" in response.data, True) self.assertEqual(len(response.data.keys()), 2) - response = self.client.get('/rest/v2/datasets/1?fields=not_found') + response = self.client.get("/rest/v2/datasets/1?fields=not_found") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # Anonymous user using fields parameter and not including research_dataset should not cause crashing self.client._credentials = {} - response = self.client.get('/rest/v2/datasets/1?fields=identifier') + response = self.client.get("/rest/v2/datasets/1?fields=identifier") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_checksum_field_for_file(self): @@ -246,20 +276,23 @@ def test_checksum_field_for_file(self): Check that checksum field works correctly """ - self._use_http_authorization('metax') - response = self.client.get('/rest/v2/files/1?fields=checksum') + self._use_http_authorization("metax") + response = self.client.get("/rest/v2/files/1?fields=checksum") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data.get('checksum'), 'Checksum JSON should be returned') - self.assertTrue(response.data['checksum'].get('algorithm')) - self.assertTrue(response.data['checksum'].get('checked')) - self.assertTrue(response.data['checksum'].get('value')) + self.assertTrue(response.data.get("checksum"), "Checksum JSON should be returned") + self.assertTrue(response.data["checksum"].get("algorithm")) + self.assertTrue(response.data["checksum"].get("checked")) + self.assertTrue(response.data["checksum"].get("value")) - response = self.client.get('/rest/v2/files/1?fields=checksum:value') + response = self.client.get("/rest/v2/files/1?fields=checksum:value") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data.get('checksum'), 'Checksum JSON should be returned') - self.assertTrue(response.data['checksum'].get('value')) - self.assertFalse(response.data['checksum'].get('algorithm')) + self.assertTrue(response.data.get("checksum"), "Checksum JSON should be returned") + self.assertTrue(response.data["checksum"].get("value")) + self.assertFalse(response.data["checksum"].get("algorithm")) - response = self.client.get('/rest/v2/files/1?fields=checksum:badvalue') + response = self.client.get("/rest/v2/files/1?fields=checksum:badvalue") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('is not part of' in response.data['detail'][0], 'Should complain about field not found') + self.assertTrue( + "is not part of" in response.data["detail"][0], + "Should complain about field not found", + ) diff --git a/src/metax_api/tests/api/rest/v2/views/common/write.py b/src/metax_api/tests/api/rest/v2/views/common/write.py index 99cbeb6b..49ba9769 100755 --- a/src/metax_api/tests/api/rest/v2/views/common/write.py +++ b/src/metax_api/tests/api/rest/v2/views/common/write.py @@ -27,28 +27,30 @@ class ApiWriteCommon(APITestCase, TestClassUtils): - def setUp(self): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) self.test_new_data = self._get_new_test_data() self._use_http_authorization() def _get_new_test_data(self): - record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=0) - record_from_test_data.update({ - "data_catalog": 1, - }) - record_from_test_data['research_dataset'].update({ - "preferred_identifier": None, - }) - record_from_test_data.pop('id', None) - record_from_test_data.pop('identifier', None) - record_from_test_data.pop('contract', None) + record_from_test_data = self._get_object_from_test_data("catalogrecord", requested_index=0) + record_from_test_data.update( + { + "data_catalog": 1, + } + ) + record_from_test_data["research_dataset"].update( + { + "preferred_identifier": None, + } + ) + record_from_test_data.pop("id", None) + record_from_test_data.pop("identifier", None) + record_from_test_data.pop("contract", None) return record_from_test_data class ApiWriteCommonFieldsTests(ApiWriteCommon): - def test_certain_create_fields_are_read_only_after_create(self): """ The following fields should be read-only after initial creation of a resource: @@ -56,60 +58,63 @@ def test_certain_create_fields_are_read_only_after_create(self): - user_created - service_created """ - response = self.client.post('/rest/v2/datasets', self.test_new_data, format="json") + response = self.client.post("/rest/v2/datasets", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # some of the fields could be empty in test data. that is fine tho, the point is that # they should not change later. - orig_date_created = response.data.get('date_created', None) - orig_user_created = response.data.get('user_created', None) - orig_service_created = response.data.get('service_created', None) + orig_date_created = response.data.get("date_created", None) + orig_user_created = response.data.get("user_created", None) + orig_service_created = response.data.get("service_created", None) altered = response.data - altered['date_created'] = altered['date_created'].replace('2017', '2010') - altered['user_created'] = 'changed' - altered['service_created'] = 'changed' + altered["date_created"] = altered["date_created"].replace("2017", "2010") + altered["user_created"] = "changed" + altered["service_created"] = "changed" - response = self.client.put('/rest/v2/datasets/%d' % altered['id'], altered, format="json") + response = self.client.put("/rest/v2/datasets/%d" % altered["id"], altered, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/datasets/%d' % altered['id'], format="json") + response = self.client.get("/rest/v2/datasets/%d" % altered["id"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(orig_date_created, response.data.get('date_created', None)) - self.assertEqual(orig_user_created, response.data.get('user_created', None)) - self.assertEqual(orig_service_created, response.data.get('service_created', None)) + self.assertEqual(orig_date_created, response.data.get("date_created", None)) + self.assertEqual(orig_user_created, response.data.get("user_created", None)) + self.assertEqual(orig_service_created, response.data.get("service_created", None)) def test_deletion_sets_removed_true_and_sets_value_for_date_removed(self): - response = self.client.post('/rest/v2/datasets', self.test_new_data, format="json") + response = self.client.post("/rest/v2/datasets", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] - response = self.client.delete('/rest/v2/datasets/%d' % cr_id) + cr_id = response.data["id"] + response = self.client.delete("/rest/v2/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # Verify date_removed got set - response = self.client.get('/rest/v2/datasets/%d?removed' % cr_id) - self.assertTrue(response.data['removed'] is True) - self.assertTrue(response.data.get('date_removed', '').startswith('2')) + response = self.client.get("/rest/v2/datasets/%d?removed" % cr_id) + self.assertTrue(response.data["removed"] is True) + self.assertTrue(response.data.get("date_removed", "").startswith("2")) def test_updating_sets_removed_false_and_empties_date_removed(self): - response = self.client.post('/rest/v2/datasets', self.test_new_data, format="json") + response = self.client.post("/rest/v2/datasets", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] - response = self.client.delete('/rest/v2/datasets/%d' % cr_id) + cr_id = response.data["id"] + response = self.client.delete("/rest/v2/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - rd = self.client.get('/rest/v2/datasets/%d?removed' % cr_id).data - rd_date_rem = rd['date_removed'] - sleep(1) # ensure that next request happens with different timestamp - response = self.client.put('/rest/v2/datasets/%d?removed' % cr_id, rd, format="json") + rd = self.client.get("/rest/v2/datasets/%d?removed" % cr_id).data + rd_date_rem = rd["date_removed"] + sleep(1) # ensure that next request happens with different timestamp + response = self.client.put("/rest/v2/datasets/%d?removed" % cr_id, rd, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/datasets/%d' % cr_id) + response = self.client.get("/rest/v2/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data['removed'] is False) - self.assertTrue(response.data.get('date_removed') is None) - self.assertTrue(response.data.get('date_modified') != rd_date_rem, 'date_modified should be updated') + self.assertTrue(response.data["removed"] is False) + self.assertTrue(response.data.get("date_removed") is None) + self.assertTrue( + response.data.get("date_modified") != rd_date_rem, + "date_modified should be updated", + ) class ApiWriteHTTPHeaderTests(CatalogRecordApiWriteCommon): @@ -119,47 +124,53 @@ class ApiWriteHTTPHeaderTests(CatalogRecordApiWriteCommon): # def test_update_with_if_unmodified_since_header_ok(self): - cr = self.client.get('/rest/v2/datasets/1').data - cr['preservation_description'] = 'damn this is good coffee' + cr = self.client.get("/rest/v2/datasets/1").data + cr["preservation_description"] = "damn this is good coffee" cr_obj = CatalogRecord.objects.get(pk=1) - headers = {'HTTP_IF_UNMODIFIED_SINCE': cr_obj.date_modified.strftime('%a, %d %b %Y %H:%M:%S GMT')} + headers = { + "HTTP_IF_UNMODIFIED_SINCE": cr_obj.date_modified.strftime("%a, %d %b %Y %H:%M:%S GMT") + } - response = self.client.put('/rest/v2/datasets/1', cr, format="json", **headers) + response = self.client.put("/rest/v2/datasets/1", cr, format="json", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_update_with_if_unmodified_since_header_precondition_failed_error(self): - cr = self.client.get('/rest/v2/datasets/1').data - cr['preservation_description'] = 'the owls are not what they seem' + cr = self.client.get("/rest/v2/datasets/1").data + cr["preservation_description"] = "the owls are not what they seem" - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'Wed, 23 Sep 2009 22:15:29 GMT'} + headers = {"HTTP_IF_UNMODIFIED_SINCE": "Wed, 23 Sep 2009 22:15:29 GMT"} - response = self.client.put('/rest/v2/datasets/1', cr, format="json", **headers) - self.assertEqual(response.status_code, 412, 'http status should be 412 = precondition failed') + response = self.client.put("/rest/v2/datasets/1", cr, format="json", **headers) + self.assertEqual( + response.status_code, 412, "http status should be 412 = precondition failed" + ) def test_update_with_if_unmodified_since_header_syntax_error(self): - cr = self.client.get('/rest/v2/datasets/1').data - cr['preservation_description'] = 'the owls are not what they seem' + cr = self.client.get("/rest/v2/datasets/1").data + cr["preservation_description"] = "the owls are not what they seem" cr_obj = CatalogRecord.objects.get(pk=1) - headers = {'HTTP_IF_UNMODIFIED_SINCE': cr_obj.date_modified.strftime('%a, %d %b %Y %H:%M:%S UTC')} + headers = { + "HTTP_IF_UNMODIFIED_SINCE": cr_obj.date_modified.strftime("%a, %d %b %Y %H:%M:%S UTC") + } - response = self.client.put('/rest/v2/datasets/1', cr, format="json", **headers) - self.assertEqual(response.status_code, 400, 'http status should be 400') + response = self.client.put("/rest/v2/datasets/1", cr, format="json", **headers) + self.assertEqual(response.status_code, 400, "http status should be 400") # # header if-unmodified-since tests, list # def test_update_list_with_if_unmodified_since_header_ok(self): - data_1 = self.client.get('/rest/v2/datasets/1', format="json").data - data_2 = self.client.get('/rest/v2/datasets/2', format="json").data + data_1 = self.client.get("/rest/v2/datasets/1", format="json").data + data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' - data_2['preservation_description'] = 'damn this is good coffee also' + data_1["preservation_description"] = "damn this is good coffee" + data_2["preservation_description"] = "damn this is good coffee also" - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.put('/rest/v2/datasets', [data_1, data_2], format="json", **headers) + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.put("/rest/v2/datasets", [data_1, data_2], format="json", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @@ -167,36 +178,48 @@ def test_update_list_with_if_unmodified_since_header_error_1(self): """ One resource being updated was updated in the meantime, resulting in an error """ - data_1 = self.client.get('/rest/v2/datasets/1', format="json").data - data_2 = self.client.get('/rest/v2/datasets/2', format="json").data + data_1 = self.client.get("/rest/v2/datasets/1", format="json").data + data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' + data_1["preservation_description"] = "damn this is good coffee" # should result in error for this record - data_2['date_modified'] = '2002-01-01T10:10:10Z' - - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.put('/rest/v2/datasets', [data_1, data_2], format="json", **headers) - self.assertEqual(len(response.data['failed']) == 1, True, 'there should be only one failed update') - self.assertEqual('modified' in response.data['failed'][0]['errors']['detail'][0], True, - 'error should indicate resource has been modified') + data_2["date_modified"] = "2002-01-01T10:10:10Z" + + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.put("/rest/v2/datasets", [data_1, data_2], format="json", **headers) + self.assertEqual( + len(response.data["failed"]) == 1, + True, + "there should be only one failed update", + ) + self.assertEqual( + "modified" in response.data["failed"][0]["errors"]["detail"][0], + True, + "error should indicate resource has been modified", + ) def test_update_list_with_if_unmodified_since_header_error_2(self): """ Field date_modified is missing, while if-modified-since header is set, resulting in an error. """ - data_1 = self.client.get('/rest/v2/datasets/1', format="json").data - data_2 = self.client.get('/rest/v2/datasets/2', format="json").data + data_1 = self.client.get("/rest/v2/datasets/1", format="json").data + data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' + data_1["preservation_description"] = "damn this is good coffee" # should result in error for this record - data_2.pop('date_modified') - - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.patch('/rest/v2/datasets', [data_1, data_2], format="json", **headers) - self.assertEqual('required' in response.data['failed'][0]['errors']['detail'][0], True, - 'error should be about field date_modified is required') + data_2.pop("date_modified") + + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.patch( + "/rest/v2/datasets", [data_1, data_2], format="json", **headers + ) + self.assertEqual( + "required" in response.data["failed"][0]["errors"]["detail"][0], + True, + "error should be about field date_modified is required", + ) def test_update_list_with_if_unmodified_since_header_error_3(self): """ @@ -204,17 +227,20 @@ def test_update_list_with_if_unmodified_since_header_error_3(self): is an accepted value. The end result should be that the resource has been modified, since the server version has a timestamp set in date_modified. """ - data_1 = self.client.get('/rest/v2/datasets/1', format="json").data - data_2 = self.client.get('/rest/v2/datasets/2', format="json").data + data_1 = self.client.get("/rest/v2/datasets/1", format="json").data + data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - data_1['preservation_description'] = 'damn this is good coffee' - data_2['preservation_description'] = 'damn this is good coffee also' - data_2['date_modified'] = None + data_1["preservation_description"] = "damn this is good coffee" + data_2["preservation_description"] = "damn this is good coffee also" + data_2["date_modified"] = None - headers = {'HTTP_IF_UNMODIFIED_SINCE': 'value is not checked'} - response = self.client.put('/rest/v2/datasets', [data_1, data_2], format="json", **headers) - self.assertEqual('modified' in response.data['failed'][0]['errors']['detail'][0], True, - 'error should indicate resource has been modified') + headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} + response = self.client.put("/rest/v2/datasets", [data_1, data_2], format="json", **headers) + self.assertEqual( + "modified" in response.data["failed"][0]["errors"]["detail"][0], + True, + "error should indicate resource has been modified", + ) class ApiWriteAtomicBulkOperations(CatalogRecordApiWriteCommon): @@ -225,47 +251,55 @@ class ApiWriteAtomicBulkOperations(CatalogRecordApiWriteCommon): """ def test_atomic_create(self): - cr = self.client.get('/rest/v2/datasets/1', format="json").data - cr.pop('id') - cr.pop('identifier') - cr['research_dataset'].pop('metadata_version_identifier') - cr['research_dataset'].pop('preferred_identifier') + cr = self.client.get("/rest/v2/datasets/1", format="json").data + cr.pop("id") + cr.pop("identifier") + cr["research_dataset"].pop("metadata_version_identifier") + cr["research_dataset"].pop("preferred_identifier") cr2 = deepcopy(cr) cr3 = deepcopy(cr) - cr3.pop('data_catalog') # causes error + cr3.pop("data_catalog") # causes error record_count_before = CatalogRecord.objects.all().count() - response = self.client.post('/rest/v2/datasets?atomic=true', [cr, cr2, cr3], format="json") + response = self.client.post("/rest/v2/datasets?atomic=true", [cr, cr2, cr3], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) - self.assertEqual(len(response.data['success']) == 0, True, response.data) - self.assertEqual(len(response.data['failed']) == 1, True, response.data) - self.assertEqual('detail' in response.data, True, response.data) - self.assertEqual('atomic' in response.data['detail'][0], True, response.data) - self.assertEqual(record_count_before, CatalogRecord.objects.all().count(), 'shouldnt create new records') + self.assertEqual(len(response.data["success"]) == 0, True, response.data) + self.assertEqual(len(response.data["failed"]) == 1, True, response.data) + self.assertEqual("detail" in response.data, True, response.data) + self.assertEqual("atomic" in response.data["detail"][0], True, response.data) + self.assertEqual( + record_count_before, + CatalogRecord.objects.all().count(), + "shouldnt create new records", + ) def test_atomic_update(self): - cr = self.client.get('/rest/v2/datasets/1', format="json").data - cr2 = self.client.get('/rest/v2/datasets/2', format="json").data - cr3 = self.client.get('/rest/v2/datasets/3', format="json").data - cr['research_dataset']['title']['en'] = 'updated' - cr2['research_dataset']['title']['en'] = 'updated' - cr3.pop('data_catalog') # causes error + cr = self.client.get("/rest/v2/datasets/1", format="json").data + cr2 = self.client.get("/rest/v2/datasets/2", format="json").data + cr3 = self.client.get("/rest/v2/datasets/3", format="json").data + cr["research_dataset"]["title"]["en"] = "updated" + cr2["research_dataset"]["title"]["en"] = "updated" + cr3.pop("data_catalog") # causes error record_count_before = CatalogRecord.objects.all().count() - response = self.client.put('/rest/v2/datasets?atomic=true', [cr, cr2, cr3], format="json") + response = self.client.put("/rest/v2/datasets?atomic=true", [cr, cr2, cr3], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data['success']) == 0, True) - self.assertEqual(len(response.data['failed']) == 1, True) - self.assertEqual('atomic' in response.data['detail'][0], True) - self.assertEqual(record_count_before, CatalogRecord.objects.all().count(), 'shouldnt create new versions') + self.assertEqual(len(response.data["success"]) == 0, True) + self.assertEqual(len(response.data["failed"]) == 1, True) + self.assertEqual("atomic" in response.data["detail"][0], True) + self.assertEqual( + record_count_before, + CatalogRecord.objects.all().count(), + "shouldnt create new versions", + ) - cr = self.client.get('/rest/v2/datasets/1', format="json").data - cr2 = self.client.get('/rest/v2/datasets/2', format="json").data - self.assertEqual(cr['research_dataset']['title']['en'] == 'updated', False) - self.assertEqual(cr2['research_dataset']['title']['en'] == 'updated', False) + cr = self.client.get("/rest/v2/datasets/1", format="json").data + cr2 = self.client.get("/rest/v2/datasets/2", format="json").data + self.assertEqual(cr["research_dataset"]["title"]["en"] == "updated", False) + self.assertEqual(cr2["research_dataset"]["title"]["en"] == "updated", False) class ApiWriteQueryParamTests(ApiWriteCommon): @@ -279,11 +313,17 @@ def test_dryrun(self): Ensure query parameter ?dryrun=true returns same result as they normally would, but changes made during the request do not get saved in the db. """ - response = self.client.post('/rest/v2/datasets?dryrun=true', self.test_new_data, format="json") + response = self.client.post( + "/rest/v2/datasets?dryrun=true", self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('id' in response.data, True) - found = CatalogRecord.objects.filter(pk=response.data['id']).exists() - self.assertEqual(found, False, 'record should not get truly created when using parameter dryrun') + self.assertEqual("id" in response.data, True) + found = CatalogRecord.objects.filter(pk=response.data["id"]).exists() + self.assertEqual( + found, + False, + "record should not get truly created when using parameter dryrun", + ) class ApiWriteCommonOperations(ApiWriteCommon): @@ -292,10 +332,10 @@ class ApiWriteCommonOperations(ApiWriteCommon): """ def test_create_file_with_empty_body_fails(self): - response = self.client.post('/rest/datasets') + response = self.client.post("/rest/datasets") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Request body is required' in response.data['detail'][0]) + self.assertTrue("Request body is required" in response.data["detail"][0]) - response = self.client.post('/rest/files') + response = self.client.post("/rest/files") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Request body is required' in response.data['detail'][0]) \ No newline at end of file + self.assertTrue("Request body is required" in response.data["detail"][0]) diff --git a/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py b/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py index aa2e11ee..cbf133af 100755 --- a/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py +++ b/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py @@ -19,29 +19,29 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(ContractApiReadTestV1, cls).setUpClass() def setUp(self): - contract_from_test_data = self._get_object_from_test_data('contract', requested_index=0) - self.pk = contract_from_test_data['id'] - self.identifier = contract_from_test_data['contract_json']['identifier'] + contract_from_test_data = self._get_object_from_test_data("contract", requested_index=0) + self.pk = contract_from_test_data["id"] + self.identifier = contract_from_test_data["contract_json"]["identifier"] self._use_http_authorization() def test_read_contract_list(self): - response = self.client.get('/rest/v2/datasets') + response = self.client.get("/rest/v2/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_contract_details_by_pk(self): - response = self.client.get('/rest/v2/contracts/%s' % self.pk) + response = self.client.get("/rest/v2/contracts/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_contract_details_by_identifier(self): - response = self.client.get('/rest/v2/contracts/%s' % self.identifier) + response = self.client.get("/rest/v2/contracts/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_contract_details_not_found(self): - response = self.client.get('/rest/v2/contracts/shouldnotexist') + response = self.client.get("/rest/v2/contracts/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) @@ -50,10 +50,10 @@ def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) self._use_http_authorization() - contract_from_test_data = self._get_object_from_test_data('contract') - self.pk = contract_from_test_data['id'] + contract_from_test_data = self._get_object_from_test_data("contract") + self.pk = contract_from_test_data["id"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified @@ -64,32 +64,38 @@ def setUp(self): self._use_http_authorization() def test_create_contract_with_existing_identifier(self): - self.test_new_data['pk'] = self.pk - response = self.client.post('/rest/v2/contracts/', self.test_new_data, format="json") + self.test_new_data["pk"] = self.pk + response = self.client.post("/rest/v2/contracts/", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.post('/rest/v2/contracts/', self.test_new_data, format="json") + response = self.client.post("/rest/v2/contracts/", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('already exists' in response.data['contract_json'][0], - 'Error regarding dublicated identifier') + self.assertTrue( + "already exists" in response.data["contract_json"][0], + "Error regarding dublicated identifier", + ) def test_update_contract(self): - self.test_new_data['pk'] = self.pk - response = self.client.put('/rest/v2/contracts/%s' % self.pk, self.test_new_data, format="json") + self.test_new_data["pk"] = self.pk + response = self.client.put( + "/rest/v2/contracts/%s" % self.pk, self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_update_contract_not_found(self): - response = self.client.put('/rest/v2/contracts/doesnotexist', self.test_new_data, format="json") + response = self.client.put( + "/rest/v2/contracts/doesnotexist", self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_add_catalog_record_to_contract(self): - new_catalog_record = self.client.get('/rest/v2/datasets/1', format="json").data - new_catalog_record.pop('id') - new_catalog_record.pop('identifier') - new_catalog_record['research_dataset'].pop('preferred_identifier') - new_catalog_record['contract'] = self.pk + new_catalog_record = self.client.get("/rest/v2/datasets/1", format="json").data + new_catalog_record.pop("id") + new_catalog_record.pop("identifier") + new_catalog_record["research_dataset"].pop("preferred_identifier") + new_catalog_record["contract"] = self.pk - response = self.client.post('/rest/v2/datasets', new_catalog_record, format="json") + response = self.client.post("/rest/v2/datasets", new_catalog_record, format="json") created_catalog_record = response.data try: @@ -97,21 +103,26 @@ def test_add_catalog_record_to_contract(self): except Exception: print(response.data) raise - self.assertEqual('research_dataset' in created_catalog_record.keys(), True) - self.assertEqual(created_catalog_record['contract']['id'], self.pk) + self.assertEqual("research_dataset" in created_catalog_record.keys(), True) + self.assertEqual(created_catalog_record["contract"]["id"], self.pk) contract = Contract.objects.get(pk=self.pk) try: - contract.records.get(pk=response.data['id']) + contract.records.get(pk=response.data["id"]) except CatalogRecord.DoesNotExist: - raise Exception('The added CatalogRecord should appear in the relation contract.records') + raise Exception( + "The added CatalogRecord should appear in the relation contract.records" + ) - response = self.client.get('/rest/v2/contracts/%d/datasets' % self.pk) - self.assertIn(created_catalog_record['id'], [cr['id'] for cr in response.data], - 'The added CatalogRecord should appear in the results of /contracts/id/datasets') + response = self.client.get("/rest/v2/contracts/%d/datasets" % self.pk) + self.assertIn( + created_catalog_record["id"], + [cr["id"] for cr in response.data], + "The added CatalogRecord should appear in the results of /contracts/id/datasets", + ) def test_delete_contract(self): - url = '/rest/v2/contracts/%s' % self.pk + url = "/rest/v2/contracts/%s" % self.pk response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(url) @@ -125,43 +136,65 @@ def test_delete_contract(self): pass if deleted_contract: - raise Exception('Deleted Contract should not be retrievable from the default objects table') + raise Exception( + "Deleted Contract should not be retrievable from the default objects table" + ) try: deleted_contract = Contract.objects_unfiltered.get(pk=self.pk) except Contract.DoesNotExist: - raise Exception('Deleted contract should not be deleted from the db') - - self.assertEqual(deleted_contract.removed, True, 'Deleted contract should be marked removed in the db') - self.assertEqual(deleted_contract.date_modified, deleted_contract.date_removed, - 'date_modified should be updated') + raise Exception("Deleted contract should not be deleted from the db") + + self.assertEqual( + deleted_contract.removed, + True, + "Deleted contract should be marked removed in the db", + ) + self.assertEqual( + deleted_contract.date_modified, + deleted_contract.date_removed, + "date_modified should be updated", + ) def test_delete_contract_catalog_records_are_marked_removed(self): # add two new records to contract new_catalog_record = self._get_new_catalog_record_test_data() - new_catalog_record['contract'] = self.pk - self.client.post('/rest/v2/datasets', new_catalog_record, format="json") - self.client.post('/rest/v2/datasets', new_catalog_record, format="json") + new_catalog_record["contract"] = self.pk + self.client.post("/rest/v2/datasets", new_catalog_record, format="json") + self.client.post("/rest/v2/datasets", new_catalog_record, format="json") - self.client.delete('/rest/v2/contracts/%s' % self.pk) + self.client.delete("/rest/v2/contracts/%s" % self.pk) contract = Contract.objects_unfiltered.get(pk=self.pk) - related_crs = contract.records(manager='objects_unfiltered').all() - response_get_1 = self.client.get('/rest/v2/datasets/%d' % related_crs[0].id) - self.assertEqual(response_get_1.status_code, status.HTTP_404_NOT_FOUND, - 'CatalogRecords of deleted contracts should not be retrievable through the api') - response_get_2 = self.client.get('/rest/v2/datasets/%d' % related_crs[1].id) - self.assertEqual(response_get_2.status_code, status.HTTP_404_NOT_FOUND, - 'CatalogRecords of deleted contracts should not be retrievable through the api') + related_crs = contract.records(manager="objects_unfiltered").all() + response_get_1 = self.client.get("/rest/v2/datasets/%d" % related_crs[0].id) + self.assertEqual( + response_get_1.status_code, + status.HTTP_404_NOT_FOUND, + "CatalogRecords of deleted contracts should not be retrievable through the api", + ) + response_get_2 = self.client.get("/rest/v2/datasets/%d" % related_crs[1].id) + self.assertEqual( + response_get_2.status_code, + status.HTTP_404_NOT_FOUND, + "CatalogRecords of deleted contracts should not be retrievable through the api", + ) for cr in related_crs: - self.assertEqual(cr.removed, True, 'Related CatalogRecord objects should be marked as removed') + self.assertEqual( + cr.removed, + True, + "Related CatalogRecord objects should be marked as removed", + ) def test_deleted_catalog_record_is_not_listed_in_contract_datasets_api(self): deleted_id = 1 - self.client.delete('/rest/v2/datasets/%d' % deleted_id) - response = self.client.get('/rest/v2/contracts/%d/datasets' % self.pk) - self.assertNotIn(deleted_id, [cr['id'] for cr in response.data], - 'The deleted CatalogRecord should not appear in the results of /contracts/id/datasets') + self.client.delete("/rest/v2/datasets/%d" % deleted_id) + response = self.client.get("/rest/v2/contracts/%d/datasets" % self.pk) + self.assertNotIn( + deleted_id, + [cr["id"] for cr in response.data], + "The deleted CatalogRecord should not appear in the results of /contracts/id/datasets", + ) def _get_new_test_data(self): return { @@ -172,22 +205,19 @@ def _get_new_test_data(self): "created": "2014-01-17T08:19:58Z", "modified": "2014-01-17T08:19:58Z", "description": "Description of unknown length", - "contact": [{ - "name": "Contact Name", - "phone": "+358501231234", - "email": "contact.email@csc.fi" - }], + "contact": [ + { + "name": "Contact Name", + "phone": "+358501231234", + "email": "contact.email@csc.fi", + } + ], "organization": { "organization_identifier": "1234567abc", - "name": "Mysterious organization" + "name": "Mysterious organization", }, - "related_service": [{ - "identifier": "local:service:id", - "name": "Name of Service" - }], - "validity": { - "start_date": "2014-01-17" - } + "related_service": [{"identifier": "local:service:id", "name": "Name of Service"}], + "validity": {"start_date": "2014-01-17"}, } } @@ -200,57 +230,56 @@ def _get_second_new_test_data(self): "created": "2014-01-17T08:19:58Z", "modified": "2014-01-17T08:19:58Z", "description": "Description of unknown length", - "contact": [{ - "name": "Contact Name", - "phone": "+358501231234", - "email": "contact.email@csc.fi" - }], + "contact": [ + { + "name": "Contact Name", + "phone": "+358501231234", + "email": "contact.email@csc.fi", + } + ], "organization": { "organization_identifier": "1234567abc", - "name": "Mysterious organization" + "name": "Mysterious organization", }, - "related_service": [{ - "identifier": "local:service:id", - "name": "Name of Service" - }], - "validity": { - "start_date": "2014-01-17" - } + "related_service": [{"identifier": "local:service:id", "name": "Name of Service"}], + "validity": {"start_date": "2014-01-17"}, } } def _get_new_catalog_record_test_data(self): - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=0) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=0 + ) return { "identifier": "http://urn.fi/urn:nbn:fi:iiidentifier", - "data_catalog": self._get_object_from_test_data('datacatalog', requested_index=0), + "data_catalog": self._get_object_from_test_data("datacatalog", requested_index=0), "research_dataset": { "modified": "2014-01-17T08:19:58Z", - "version_notes": [ - "This version contains changes to x and y." + "version_notes": ["This version contains changes to x and y."], + "title": {"en": "Wonderful Title"}, + "description": [ + { + "en": "A descriptive description describing the contents of this dataset. Must be descriptive." + } ], - "title": { - "en": "Wonderful Title" - }, - "description": [{ - "en": "A descriptive description describing the contents of this dataset. Must be descriptive." - }], - "creator": [{ - "@type": "Person", - "name": "Teppo Testaaja", - "member_of": { + "creator": [ + { + "@type": "Person", + "name": "Teppo Testaaja", + "member_of": { + "@type": "Organization", + "name": {"fi": "Mysterious Organization"}, + }, + } + ], + "curator": [ + { "@type": "Organization", - "name": {"fi": "Mysterious Organization"} + "name": {"en": "Curator org", "fi": "Organisaatio"}, } - }], - "curator": [{ - "@type": "Organization", - "name": {"en": "Curator org", "fi": "Organisaatio"} - }], - "language": [{ - "identifier": "http://lexvo.org/id/iso639-3/aar" - }], + ], + "language": [{"identifier": "http://lexvo.org/id/iso639-3/aar"}], "total_files_byte_size": 1024, - "files": catalog_record_from_test_data['research_dataset']['files'] - } + "files": catalog_record_from_test_data["research_dataset"]["files"], + }, } diff --git a/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py b/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py index 22dcc062..46f29bcd 100755 --- a/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py +++ b/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py @@ -18,15 +18,17 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DataCatalogApiReadBasicTests, cls).setUpClass() def setUp(self): - data_catalog_from_test_data = self._get_object_from_test_data('datacatalog', requested_index=0) + data_catalog_from_test_data = self._get_object_from_test_data( + "datacatalog", requested_index=0 + ) self._use_http_authorization() - self.pk = data_catalog_from_test_data['id'] - self.identifier = data_catalog_from_test_data['catalog_json']['identifier'] + self.pk = data_catalog_from_test_data["id"] + self.identifier = data_catalog_from_test_data["catalog_json"]["identifier"] def test_basic_get(self): - response = self.client.get('/rest/v2/datacatalogs/%s' % self.identifier) + response = self.client.get("/rest/v2/datacatalogs/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py b/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py index 66205590..6a0a60fb 100755 --- a/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py +++ b/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py @@ -20,55 +20,65 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DataCatalogApiWriteCommon, cls).setUpClass() def setUp(self): - self.new_test_data = self._get_object_from_test_data('datacatalog') - self.new_test_data.pop('id') - self.new_test_data['catalog_json']['identifier'] = 'new-data-catalog' + self.new_test_data = self._get_object_from_test_data("datacatalog") + self.new_test_data.pop("id") + self.new_test_data["catalog_json"]["identifier"] = "new-data-catalog" self._use_http_authorization() class DataCatalogApiWriteBasicTests(DataCatalogApiWriteCommon): - def test_identifier_is_auto_generated(self): - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertNotEqual(response.data['catalog_json'].get('identifier', None), None, 'identifier should be created') + self.assertNotEqual( + response.data["catalog_json"].get("identifier", None), + None, + "identifier should be created", + ) def test_research_dataset_schema_missing_ok(self): - self.new_test_data['catalog_json'].pop('research_dataset_schema', None) - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + self.new_test_data["catalog_json"].pop("research_dataset_schema", None) + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_research_dataset_schema_not_found_error(self): - self.new_test_data['catalog_json']['research_dataset_schema'] = 'notfound' - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + self.new_test_data["catalog_json"]["research_dataset_schema"] = "notfound" + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_disallow_versioning_in_harvested_catalogs(self): - self.new_test_data['catalog_json']['dataset_versioning'] = True - self.new_test_data['catalog_json']['harvested'] = True - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + self.new_test_data["catalog_json"]["dataset_versioning"] = True + self.new_test_data["catalog_json"]["harvested"] = True + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('versioning' in response.data['detail'][0], True, response.data) + self.assertEqual("versioning" in response.data["detail"][0], True, response.data) def test_create_identifier_already_exists(self): - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('already exists' in response.data['catalog_json']['identifier'][0], - True, response.data) + self.assertEqual( + "already exists" in response.data["catalog_json"]["identifier"][0], + True, + response.data, + ) def test_delete(self): - response = self.client.delete('/rest/v2/datacatalogs/1') + response = self.client.delete("/rest/v2/datacatalogs/1") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) dc_deleted = DataCatalog.objects_unfiltered.get(pk=1) self.assertEqual(dc_deleted.removed, True) - self.assertEqual(dc_deleted.date_modified, dc_deleted.date_removed, 'date_modified should be updated') + self.assertEqual( + dc_deleted.date_modified, + dc_deleted.date_removed, + "date_modified should be updated", + ) class DataCatalogApiWriteReferenceDataTests(DataCatalogApiWriteCommon): @@ -78,15 +88,15 @@ class DataCatalogApiWriteReferenceDataTests(DataCatalogApiWriteCommon): """ def test_create_data_catalog_with_invalid_reference_data(self): - dc = self.new_test_data['catalog_json'] - dc['field_of_science'][0]['identifier'] = 'nonexisting' - dc['language'][0]['identifier'] = 'nonexisting' - dc['access_rights']['access_type'][0]['identifier'] = 'nonexisting' - dc['access_rights']['license'][0]['identifier'] = 'nonexisting' - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + dc = self.new_test_data["catalog_json"] + dc["field_of_science"][0]["identifier"] = "nonexisting" + dc["language"][0]["identifier"] = "nonexisting" + dc["access_rights"]["access_type"][0]["identifier"] = "nonexisting" + dc["access_rights"]["license"][0]["identifier"] = "nonexisting" + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('catalog_json' in response.data.keys(), True) - self.assertEqual(len(response.data['catalog_json']), 4) + self.assertEqual("catalog_json" in response.data.keys(), True) + self.assertEqual(len(response.data["catalog_json"]), 4) def test_create_data_catalog_populate_fields_from_reference_data(self): """ @@ -97,71 +107,90 @@ def test_create_data_catalog_populate_fields_from_reference_data(self): 3) Check that labels have also been copied to data catalog to their approriate fields """ cache = RedisClient() - refdata = cache.get('reference_data')['reference_data'] - orgdata = cache.get('reference_data')['organization_data'] + refdata = cache.get("reference_data")["reference_data"] + orgdata = cache.get("reference_data")["organization_data"] refs = {} data_types = [ - 'access_type', - 'field_of_science', - 'language', - 'license', + "access_type", + "field_of_science", + "language", + "license", ] # the values in these selected entries will be used throghout the rest of the test case for dtype in data_types: entry = refdata[dtype][0] refs[dtype] = { - 'code': entry['code'], - 'uri': entry['uri'], - 'label': entry.get('label', None), + "code": entry["code"], + "uri": entry["uri"], + "label": entry.get("label", None), } - refs['organization'] = { - 'uri': orgdata['organization'][1]['uri'], - 'code': orgdata['organization'][1]['code'], - 'label': orgdata['organization'][1]['label'], + refs["organization"] = { + "uri": orgdata["organization"][1]["uri"], + "code": orgdata["organization"][1]["code"], + "label": orgdata["organization"][1]["label"], } # replace the relations with objects that have only the identifier set with code as value, # to easily check that label was populated (= that it appeared in the dataset after create) # without knowing its original value from the generated test data - dc = self.new_test_data['catalog_json'] - dc['field_of_science'][0] = {'identifier': refs['field_of_science']['code']} - dc['language'][0] = {'identifier': refs['language']['code']} - dc['access_rights']['access_type'][0] = {'identifier': refs['access_type']['code']} - dc['access_rights']['license'][0] = {'identifier': refs['license']['code']} + dc = self.new_test_data["catalog_json"] + dc["field_of_science"][0] = {"identifier": refs["field_of_science"]["code"]} + dc["language"][0] = {"identifier": refs["language"]["code"]} + dc["access_rights"]["access_type"][0] = {"identifier": refs["access_type"]["code"]} + dc["access_rights"]["license"][0] = {"identifier": refs["license"]["code"]} # these have other required fields, so only update the identifier with code - dc['publisher']['identifier'] = refs['organization']['code'] - dc['access_rights']['has_rights_related_agent'][0]['identifier'] = refs['organization']['code'] + dc["publisher"]["identifier"] = refs["organization"]["code"] + dc["access_rights"]["has_rights_related_agent"][0]["identifier"] = refs["organization"][ + "code" + ] - response = self.client.post('/rest/v2/datacatalogs', self.new_test_data, format="json") + response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('catalog_json' in response.data.keys(), True) + self.assertEqual("catalog_json" in response.data.keys(), True) - new_dc = response.data['catalog_json'] + new_dc = response.data["catalog_json"] self._assert_uri_copied_to_identifier(refs, new_dc) self._assert_label_copied_to_pref_label(refs, new_dc) self._assert_label_copied_to_title(refs, new_dc) self._assert_label_copied_to_name(refs, new_dc) def _assert_uri_copied_to_identifier(self, refs, new_dc): - self.assertEqual(refs['field_of_science']['uri'], new_dc['field_of_science'][0]['identifier']) - self.assertEqual(refs['language']['uri'], new_dc['language'][0]['identifier']) - self.assertEqual(refs['access_type']['uri'], new_dc['access_rights']['access_type'][0]['identifier']) - self.assertEqual(refs['license']['uri'], new_dc['access_rights']['license'][0]['identifier']) - self.assertEqual(refs['organization']['uri'], new_dc['publisher']['identifier']) - self.assertEqual(refs['organization']['uri'], - new_dc['access_rights']['has_rights_related_agent'][0]['identifier']) + self.assertEqual( + refs["field_of_science"]["uri"], new_dc["field_of_science"][0]["identifier"] + ) + self.assertEqual(refs["language"]["uri"], new_dc["language"][0]["identifier"]) + self.assertEqual( + refs["access_type"]["uri"], + new_dc["access_rights"]["access_type"][0]["identifier"], + ) + self.assertEqual( + refs["license"]["uri"], new_dc["access_rights"]["license"][0]["identifier"] + ) + self.assertEqual(refs["organization"]["uri"], new_dc["publisher"]["identifier"]) + self.assertEqual( + refs["organization"]["uri"], + new_dc["access_rights"]["has_rights_related_agent"][0]["identifier"], + ) def _assert_label_copied_to_pref_label(self, refs, new_dc): - self.assertEqual(refs['field_of_science']['label'], new_dc['field_of_science'][0].get('pref_label', None)) - self.assertEqual(refs['access_type']['label'], - new_dc['access_rights']['access_type'][0].get('pref_label', None)) + self.assertEqual( + refs["field_of_science"]["label"], + new_dc["field_of_science"][0].get("pref_label", None), + ) + self.assertEqual( + refs["access_type"]["label"], + new_dc["access_rights"]["access_type"][0].get("pref_label", None), + ) def _assert_label_copied_to_title(self, refs, new_dc): - self.assertEqual(refs['license']['label'], new_dc['access_rights']['license'][0].get('title', None)) + self.assertEqual( + refs["license"]["label"], + new_dc["access_rights"]["license"][0].get("title", None), + ) def _assert_label_copied_to_name(self, refs, new_dc): - self.assertEqual(refs['organization']['label'], new_dc['publisher']['name']) + self.assertEqual(refs["organization"]["label"], new_dc["publisher"]["name"]) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/api_version_lock.py b/src/metax_api/tests/api/rest/v2/views/datasets/api_version_lock.py index cf35762e..bd2ec406 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/api_version_lock.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/api_version_lock.py @@ -15,6 +15,7 @@ CR = CatalogRecordV2 + class CatalogRecordApiLock(CatalogRecordApiWriteCommon): """ Tests that no datasets created or edited by v2 api can be further @@ -31,19 +32,19 @@ def _create_v1_dataset(self, cumulative=False, cr=None, draft=False): if cumulative: # not to mess up with the original test dataset cr = deepcopy(cr) - cr['cumulative_state'] = 1 + cr["cumulative_state"] = 1 if draft: - params = 'draft' + params = "draft" else: - params = '' + params = "" - response = self.client.post(f'/rest/datasets?{params}', cr, format='json') + response = self.client.post(f"/rest/datasets?{params}", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual(response.data['api_meta']['version'], 1) + self.assertEqual(response.data["api_meta"]["version"], 1) - cr = CatalogRecord.objects.get(pk=response.data['id']) - self.assertEqual(cr.api_meta['version'], 1, 'api_version should be 1') + cr = CatalogRecord.objects.get(pk=response.data["id"]) + self.assertEqual(cr.api_meta["version"], 1, "api_version should be 1") return response.data @@ -54,26 +55,30 @@ def _create_v2_dataset(self, cumulative=False, cr=None, draft=False): if cumulative: # not to mess up with the original test dataset cr = deepcopy(cr) - cr['cumulative_state'] = 1 + cr["cumulative_state"] = 1 if draft: - params = 'draft' + params = "draft" else: - params = '' + params = "" - response = self.client.post(f'/rest/v2/datasets?{params}', cr, format='json') + response = self.client.post(f"/rest/v2/datasets?{params}", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual(response.data['api_meta']['version'], 2) + self.assertEqual(response.data["api_meta"]["version"], 2) - cr = CatalogRecord.objects.get(pk=response.data['id']) - self.assertEqual(cr.api_meta['version'], 2, 'api_version should be 2') + cr = CatalogRecord.objects.get(pk=response.data["id"]) + self.assertEqual(cr.api_meta["version"], 2, "api_version should be 2") return response.data def _assert_api_version(self, identifier, version): - response = self.client.get(f'/rest/datasets/{identifier}') + response = self.client.get(f"/rest/datasets/{identifier}") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['api_meta']['version'], version, 'api_version should have been changed') + self.assertEqual( + response.data["api_meta"]["version"], + version, + "api_version should have been changed", + ) def test_datasets_are_assigned_to_correct_api_version_on_create(self): """ @@ -90,48 +95,85 @@ def test_version_v2_blocks_all_v1_apis(self): cr_v2 = self._create_v2_dataset() - cr_v2['research_dataset']['title']['en'] = 'totally unique changes to the english title' + cr_v2["research_dataset"]["title"]["en"] = "totally unique changes to the english title" # test basic update operations - response = self.client.put(f'/rest/datasets/{cr_v2["id"]}', cr_v2, format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') - - response = self.client.patch(f'/rest/datasets/{cr_v2["id"]}', cr_v2, format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') + response = self.client.put(f'/rest/datasets/{cr_v2["id"]}', cr_v2, format="json") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) + + response = self.client.patch(f'/rest/datasets/{cr_v2["id"]}', cr_v2, format="json") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) cr_v1 = self._create_v1_dataset() - cr_v1['research_dataset']['title']['en'] = 'this title update should be file' - - response = self.client.put('/rest/datasets', [cr_v1, cr_v2], format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') - - response = self.client.patch('/rest/datasets', [cr_v1, cr_v2], format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') + cr_v1["research_dataset"]["title"]["en"] = "this title update should be file" + + response = self.client.put("/rest/datasets", [cr_v1, cr_v2], format="json") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) + + response = self.client.patch("/rest/datasets", [cr_v1, cr_v2], format="json") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) # test change_cumulative_state params = f'identifier={cr_v2["identifier"]}&cumulative_state=1' - response = self.client.post(f'/rpc/datasets/change_cumulative_state?{params}', format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') + response = self.client.post( + f"/rpc/datasets/change_cumulative_state?{params}", format="json" + ) + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) # add directory with some files in it to test other rpc apis # dir contains first 5 files 'pid:urn:n' cr_dirs = deepcopy(self.cr_test_data) - cr_dirs['research_dataset']['directories'] = [ + cr_dirs["research_dataset"]["directories"] = [ { "title": "dir_name", "identifier": "pid:urn:dir:3", "description": "What is in this directory", "use_category": { "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/method" - } + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/method", + }, } ] @@ -142,19 +184,35 @@ def test_version_v2_blocks_all_v1_apis(self): # the directory does not have anything to add but it is not relevant here, since # api should return error about the api version before that params = f'cr_identifier={cr_dirs["identifier"]}&dir_identifier=pid:urn:dir:3' - response = self.client.post(f'/rpc/datasets/refresh_directory_content?{params}', format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') + response = self.client.post( + f"/rpc/datasets/refresh_directory_content?{params}", format="json" + ) + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) # test fix_deprecated - response = self.client.delete('/rest/files/pid:urn:1') + response = self.client.delete("/rest/files/pid:urn:1") self.assertEqual(response.status_code, status.HTTP_200_OK) params = f'identifier={cr_dirs["identifier"]}' - response = self.client.post(f'/rpc/datasets/fix_deprecated?{params}', format='json') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'v1 modifications should have been blocked') - self.assertTrue('correct api version' in response.data['detail'][0], 'msg should be about api version') + response = self.client.post(f"/rpc/datasets/fix_deprecated?{params}", format="json") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "v1 modifications should have been blocked", + ) + self.assertTrue( + "correct api version" in response.data["detail"][0], + "msg should be about api version", + ) def test_v2_rest_api_modification_updates_api_version(self): """ @@ -164,34 +222,34 @@ def test_v2_rest_api_modification_updates_api_version(self): # test basic single PUT/PATCH updates - for http_verb in ['put', 'patch']: + for http_verb in ["put", "patch"]: update_request = getattr(self.client, http_verb) cr_v1 = self._create_v1_dataset() - cr_v1['research_dataset']['title']['en'] = 'some new title' + cr_v1["research_dataset"]["title"]["en"] = "some new title" - response = update_request(f'/rest/v2/datasets/{cr_v1["id"]}', cr_v1, format='json') + response = update_request(f'/rest/v2/datasets/{cr_v1["id"]}', cr_v1, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_api_version(cr_v1['identifier'], 2) + self._assert_api_version(cr_v1["identifier"], 2) # test basic bulk PUT/PATCH updates - for http_verb in ['put', 'patch']: + for http_verb in ["put", "patch"]: update_request = getattr(self.client, http_verb) cr_v1_0 = self._create_v1_dataset() cr_v1_1 = self._create_v1_dataset() - cr_v1_0['research_dataset']['title']['en'] = 'some new title' - cr_v1_1['research_dataset']['title']['en'] = 'some new title for another' + cr_v1_0["research_dataset"]["title"]["en"] = "some new title" + cr_v1_1["research_dataset"]["title"]["en"] = "some new title for another" - response = update_request('/rest/v2/datasets', [cr_v1_0, cr_v1_1], format='json') + response = update_request("/rest/v2/datasets", [cr_v1_0, cr_v1_1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_api_version(cr_v1_0['identifier'], 2) - self._assert_api_version(cr_v1_1['identifier'], 2) + self._assert_api_version(cr_v1_0["identifier"], 2) + self._assert_api_version(cr_v1_1["identifier"], 2) # test POST /rest/v2/datasets/{PID}/files updates api version @@ -199,43 +257,47 @@ def test_v2_rest_api_modification_updates_api_version(self): cr_v1 = self._create_v1_dataset(cumulative=True) file_changes = { - 'files': [ - { 'identifier': 'pid:urn:1'}, # adds file, but entry is otherwise not persisted - { 'identifier': 'pid:urn:2', 'title': 'custom title', 'use_category': { 'identifier': 'source' } }, + "files": [ + {"identifier": "pid:urn:1"}, # adds file, but entry is otherwise not persisted + { + "identifier": "pid:urn:2", + "title": "custom title", + "use_category": {"identifier": "source"}, + }, ] } - response = self.client.post(f'/rest/v2/datasets/{cr_v1["id"]}/files', file_changes, format='json') + response = self.client.post( + f'/rest/v2/datasets/{cr_v1["id"]}/files', file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_api_version(cr_v1['id'], 2) + self._assert_api_version(cr_v1["id"], 2) # test PUT/PATCH /rest/v2/datasets/{PID}/files/user_metadata - for http_verb in ['put', 'patch']: + for http_verb in ["put", "patch"]: update_request = getattr(self.client, http_verb) cr = self.cr_test_data - cr['research_dataset']['files'] = [ + cr["research_dataset"]["files"] = [ { "title": "customtitle", "identifier": "pid:urn:1", "use_category": { "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source" - } + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", + }, }, ] cr_v1 = self._create_v1_dataset(cr) user_metadata = { - 'files': [ + "files": [ { - 'identifier': 'pid:urn:1', - 'title': 'custom title 1', - 'use_category': { - 'identifier': 'source' - } + "identifier": "pid:urn:1", + "title": "custom title 1", + "use_category": {"identifier": "source"}, } ] } @@ -243,11 +305,11 @@ def test_v2_rest_api_modification_updates_api_version(self): response = update_request( f'/rest/v2/datasets/{cr_v1["id"]}/files/user_metadata', user_metadata, - format='json' + format="json", ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_api_version(cr_v1['id'], 2) + self._assert_api_version(cr_v1["id"], 2) def test_v2_rpc_api_modification_updates_api_version(self): """ @@ -260,18 +322,22 @@ def test_v2_rpc_api_modification_updates_api_version(self): # create one non-cumulative dataset without files so it can be edited with this rpc cr_v1_non_cum = self.cr_test_data - cr_v1_non_cum['cumulative_state'] = 0 - del cr_v1_non_cum['research_dataset']['files'] + cr_v1_non_cum["cumulative_state"] = 0 + del cr_v1_non_cum["research_dataset"]["files"] cr_v1_non_cum = self._create_v1_dataset(cr=cr_v1_non_cum) params = f'identifier={cr_v1_non_cum["identifier"]}&cumulative_state=1' - response = self.client.post(f'/rpc/v2/datasets/change_cumulative_state?{params}', format='json') + response = self.client.post( + f"/rpc/v2/datasets/change_cumulative_state?{params}", format="json" + ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) self._assert_api_version(cr_v1_non_cum["identifier"], 2) params = f'identifier={cr_v1_cum["identifier"]}&cumulative_state=2' - response = self.client.post(f'/rpc/v2/datasets/change_cumulative_state?{params}', format='json') + response = self.client.post( + f"/rpc/v2/datasets/change_cumulative_state?{params}", format="json" + ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) self._assert_api_version(cr_v1_cum["identifier"], 2) @@ -280,15 +346,15 @@ def test_v2_rpc_api_modification_updates_api_version(self): cr_v1 = self._create_v1_dataset() params = f'identifier={cr_v1["identifier"]}' - response = self.client.post(f'/rpc/v2/datasets/create_draft?{params}', format='json') + response = self.client.post(f"/rpc/v2/datasets/create_draft?{params}", format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) response = self.client.get(f'/rest/datasets/{cr_v1["identifier"]}') self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_v1 = response.data - new_id = response.data['identifier'] - response = self.client.get(f'/rest/datasets/{new_id}') + new_id = response.data["identifier"] + response = self.client.get(f"/rest/datasets/{new_id}") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) new_dataset = response.data @@ -299,15 +365,15 @@ def test_v2_rpc_api_modification_updates_api_version(self): cr_v1 = self._create_v1_dataset() params = f'identifier={cr_v1["identifier"]}' - response = self.client.post(f'/rpc/v2/datasets/create_new_version?{params}', format='json') + response = self.client.post(f"/rpc/v2/datasets/create_new_version?{params}", format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) response = self.client.get(f'/rest/datasets/{cr_v1["identifier"]}') self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_v1 = response.data - new_id = response.data['identifier'] - response = self.client.get(f'/rest/datasets/{new_id}') + new_id = response.data["identifier"] + response = self.client.get(f"/rest/datasets/{new_id}") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) new_dataset = response.data diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py index bd09cf74..00b5c7a9 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py @@ -21,10 +21,12 @@ DFT_CATALOG = django_settings.DFT_DATA_CATALOG_IDENTIFIER ATT_CATALOG = django_settings.ATT_DATA_CATALOG_IDENTIFIER + class CatalogRecordDraftTests(CatalogRecordApiWriteCommon): """ Tests related to draft datasets """ + def setUp(self): super().setUp() @@ -32,87 +34,91 @@ def setUp(self): dc = DataCatalog.objects.get(pk=1) catalog_json = dc.catalog_json for identifier in END_USER_ALLOWED_DATA_CATALOGS: - catalog_json['identifier'] = identifier + catalog_json["identifier"] = identifier # not all non-draft catalogs are actually ida but that is not tested here - catalog_json['research_dataset_schema'] = 'dft' if identifier == DFT_CATALOG else 'ida' + catalog_json["research_dataset_schema"] = "dft" if identifier == DFT_CATALOG else "ida" dc = DataCatalog.objects.create( catalog_json=catalog_json, date_created=get_tz_aware_now_without_micros(), - catalog_record_services_create='testuser,api_auth_user,metax', - catalog_record_services_edit='testuser,api_auth_user,metax', - catalog_record_services_read='testuser,api_auth_user,metax' + catalog_record_services_create="testuser,api_auth_user,metax", + catalog_record_services_edit="testuser,api_auth_user,metax", + catalog_record_services_read="testuser,api_auth_user,metax", ) self.minimal_draft = { "metadata_provider_org": "abc-org-123", "metadata_provider_user": "abc-usr-123", - "research_dataset": { - "title": { - "en": "Wonderful Title" - } - } + "research_dataset": {"title": {"en": "Wonderful Title"}}, } self.token = get_test_oidc_token(new_proxy=True) self._mock_token_validation_succeeds() # Create published record with owner: testuser and pk 1 # Create draft records with owner: testuser, pk: 2 and owner: 'some owner who is not you', pk 3 - self._set_cr_owner_and_state(1, 'published', self.token['CSCUserName']) # Published dataset - self.assertEqual(CatalogRecordV2.objects.get(pk=1).metadata_provider_user, 'testuser') + self._set_cr_owner_and_state(1, "published", self.token["CSCUserName"]) # Published dataset + self.assertEqual(CatalogRecordV2.objects.get(pk=1).metadata_provider_user, "testuser") - self._set_cr_owner_and_state(2, 'draft', self.token['CSCUserName']) # testusers' draft - self.assertEqual(CatalogRecordV2.objects.get(pk=2).metadata_provider_user, 'testuser') + self._set_cr_owner_and_state(2, "draft", self.token["CSCUserName"]) # testusers' draft + self.assertEqual(CatalogRecordV2.objects.get(pk=2).metadata_provider_user, "testuser") - self._set_cr_owner_and_state(3, 'draft', '#### Some owner who is not you ####') # Draft dataset for some user - self.assertNotEqual(CatalogRecordV2.objects.get(pk=3).metadata_provider_user, 'testuser') + self._set_cr_owner_and_state( + 3, "draft", "#### Some owner who is not you ####" + ) # Draft dataset for some user + self.assertNotEqual(CatalogRecordV2.objects.get(pk=3).metadata_provider_user, "testuser") def _set_cr_owner_and_state(self, cr_id, state, owner): - ''' helper method for testing user accessibility for draft datasets ''' + """ helper method for testing user accessibility for draft datasets """ cr = CatalogRecordV2.objects.get(pk=cr_id) cr.state = state cr.user_created = owner cr.metadata_provider_user = owner - cr.editor = None # pretend the record was created by user directly - cr.data_catalog_id = DataCatalog.objects.get(catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0]).id + cr.editor = None # pretend the record was created by user directly + cr.data_catalog_id = DataCatalog.objects.get( + catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0] + ).id cr.force_save() def test_field_state_exists(self): """Try fetching any dataset, field 'state' should be returned'""" - cr = self.client.get('/rest/v2/datasets/13').data - self.assertEqual('state' in cr, True) + cr = self.client.get("/rest/v2/datasets/13").data + self.assertEqual("state" in cr, True) def _test_issued_date_is_not_generated_for_drafts(self): - ''' + """ Drafts will not have the issued date generated Field is created when dataset is published - ''' + """ # Dataset without issued date - self.cr_full_ida_test_data['research_dataset'].pop('issued', None) + self.cr_full_ida_test_data["research_dataset"].pop("issued", None) # Create draft - response = self.client.post('/rest/v2/datasets?draft=true', self.cr_full_ida_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?draft=true", self.cr_full_ida_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue('issued' not in response.data['research_dataset'], response.data) + self.assertTrue("issued" not in response.data["research_dataset"], response.data) # Issued_date is generated when dataset is published - publish = self.client.post('/rpc/v2/datasets/publish_dataset?identifier={}'.format(response.data['identifier'])) + publish = self.client.post( + "/rpc/v2/datasets/publish_dataset?identifier={}".format(response.data["identifier"]) + ) self.assertEqual(publish.status_code, status.HTTP_200_OK, publish.data) - published = self.client.get('/rest/v2/datasets/{}'.format(response.data['identifier'])) + published = self.client.get("/rest/v2/datasets/{}".format(response.data["identifier"])) self.assertEqual(published.status_code, status.HTTP_200_OK, published.data) - self.assertTrue('issued' in published.data['research_dataset'], published.data) + self.assertTrue("issued" in published.data["research_dataset"], published.data) def test_change_state_field_through_API(self): """Fetch a dataset and change its state. - Value should remain: 'published' """ + Value should remain: 'published'""" - cr = self.client.get('/rest/v2/datasets/1').data - cr['state'] = 'changed value' - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1").data + cr["state"] = "changed value" + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertFalse(response.data['state'] == 'changed value') + self.assertFalse(response.data["state"] == "changed value") ### # Tests for different user roles access to drafts @@ -120,56 +126,56 @@ def test_change_state_field_through_API(self): @responses.activate def test_endusers_access_to_draft_datasets(self): - ''' End user should get published data and his/her drafts ''' + """ End user should get published data and his/her drafts """ # Test access as end user - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) # Test access for owner of dataset - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.status_code) - response = self.client.get('/rest/v2/datasets/2') + response = self.client.get("/rest/v2/datasets/2") self.assertEqual(response.status_code, status.HTTP_200_OK, response.status_code) - response = self.client.get('/rest/v2/datasets/3') + response = self.client.get("/rest/v2/datasets/3") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.status_code) # Test for multiple datasets - response = self.client.get('/rest/v2/datasets', format="json") + response = self.client.get("/rest/v2/datasets", format="json") # Returned list of datasets should not have owner "#### Some owner who is not you ####" - owners = [cr['metadata_provider_user'] for cr in response.data['results']] - self.assertEqual('#### Some owner who is not you ####' not in owners, True, response.data) + owners = [cr["metadata_provider_user"] for cr in response.data["results"]] + self.assertEqual("#### Some owner who is not you ####" not in owners, True, response.data) def test_service_users_access_to_draft_datasets(self): - ''' Service users should get all data ''' + """ Service users should get all data """ # test access as a service-user - self._use_http_authorization(method='basic', username='metax') + self._use_http_authorization(method="basic", username="metax") - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.status_code) - response = self.client.get('/rest/v2/datasets/2') + response = self.client.get("/rest/v2/datasets/2") self.assertEqual(response.status_code, status.HTTP_200_OK, response.status_code) - response = self.client.get('/rest/v2/datasets/3') + response = self.client.get("/rest/v2/datasets/3") self.assertEqual(response.status_code, status.HTTP_200_OK, response.status_code) # test for multiple datasets - response = self.client.get('/rest/v2/datasets', format="json") + response = self.client.get("/rest/v2/datasets", format="json") # Returned list of datasets should have owner "#### Some owner who is not you ####" - owners = [cr['metadata_provider_user'] for cr in response.data['results']] - self.assertEqual('#### Some owner who is not you ####' in owners, True, response.data) + owners = [cr["metadata_provider_user"] for cr in response.data["results"]] + self.assertEqual("#### Some owner who is not you ####" in owners, True, response.data) def test_anonymous_users_access_to_draft_datasets(self): - ''' Unauthenticated user should get only published datasets ''' + """ Unauthenticated user should get only published datasets """ # Test access as unauthenticated user self.client._credentials = {} - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/v2/datasets/2') + response = self.client.get("/rest/v2/datasets/2") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) - response = self.client.get('/rest/v2/datasets/3') + response = self.client.get("/rest/v2/datasets/3") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) # test for multiple datasets - response = self.client.get('/rest/v2/datasets', format="json") + response = self.client.get("/rest/v2/datasets", format="json") # Returned list of datasets should not have drafts - states = [cr['state'] for cr in response.data['results']] - self.assertEqual('draft' not in states, True, response.data) + states = [cr["state"] for cr in response.data["results"]] + self.assertEqual("draft" not in states, True, response.data) ### # Tests for different user roles access to update drafts @@ -177,76 +183,82 @@ def test_anonymous_users_access_to_draft_datasets(self): @responses.activate def test_endusers_can_update_draft_datasets(self): - ''' End user should be able to update only his/her drafts ''' + """ End user should be able to update only his/her drafts """ # Set end user - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) - for http_verb in ['put', 'patch']: + for http_verb in ["put", "patch"]: update_request = getattr(self.client, http_verb) - data1 = self.client.get('/rest/v2/datasets/1').data # published - response = update_request('/rest/v2/datasets/1', data1, format="json") + data1 = self.client.get("/rest/v2/datasets/1").data # published + response = update_request("/rest/v2/datasets/1", data1, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - data2 = self.client.get('/rest/v2/datasets/2').data # end users own draft - response = update_request('/rest/v2/datasets/2', data2, format="json") + data2 = self.client.get("/rest/v2/datasets/2").data # end users own draft + response = update_request("/rest/v2/datasets/2", data2, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - data3 = self.client.get('/rest/v2/datasets/3').data # someone elses draft - response = update_request('/rest/v2/datasets/3', data3, format="json") + data3 = self.client.get("/rest/v2/datasets/3").data # someone elses draft + response = update_request("/rest/v2/datasets/3", data3, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) # test for multiple datasets - response = update_request('/rest/v2/datasets', [data1, data2, data3], format="json") - owners = [cr['object']['metadata_provider_user'] for cr in response.data['success']] - self.assertEqual('#### Some owner who is not you ####' not in owners, True, response.data) + response = update_request("/rest/v2/datasets", [data1, data2, data3], format="json") + owners = [cr["object"]["metadata_provider_user"] for cr in response.data["success"]] + self.assertEqual( + "#### Some owner who is not you ####" not in owners, True, response.data + ) def test_service_users_can_update_draft_datasets(self): - ''' Dataset drafts should be able to be updated by service users (service is responsible that - their current user in e.g. Qvain is allowed to access the dataset)''' + """Dataset drafts should be able to be updated by service users (service is responsible that + their current user in e.g. Qvain is allowed to access the dataset)""" # Set service-user - self._use_http_authorization(method='basic', username='metax') + self._use_http_authorization(method="basic", username="metax") - for http_verb in ['put', 'patch']: + for http_verb in ["put", "patch"]: update_request = getattr(self.client, http_verb) - data1 = self.client.get('/rest/v2/datasets/1').data # published - response = update_request('/rest/v2/datasets/1', data1, format="json") + data1 = self.client.get("/rest/v2/datasets/1").data # published + response = update_request("/rest/v2/datasets/1", data1, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - data2 = self.client.get('/rest/v2/datasets/2').data # draft - response = update_request('/rest/v2/datasets/2', data2, format="json") + data2 = self.client.get("/rest/v2/datasets/2").data # draft + response = update_request("/rest/v2/datasets/2", data2, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - data3 = self.client.get('/rest/v2/datasets/3').data # draft - response = update_request('/rest/v2/datasets/3', data3, format="json") + data3 = self.client.get("/rest/v2/datasets/3").data # draft + response = update_request("/rest/v2/datasets/3", data3, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # test for multiple datasets - response = update_request('/rest/v2/datasets', [data1, data2, data3], format="json") - self.assertEqual(len(response.data['success']), 3, 'response.data should contain 3 changed objects') - owners = [cr['object']['metadata_provider_user'] for cr in response.data['success']] - self.assertEqual('#### Some owner who is not you ####' in owners, True, response.data) + response = update_request("/rest/v2/datasets", [data1, data2, data3], format="json") + self.assertEqual( + len(response.data["success"]), + 3, + "response.data should contain 3 changed objects", + ) + owners = [cr["object"]["metadata_provider_user"] for cr in response.data["success"]] + self.assertEqual("#### Some owner who is not you ####" in owners, True, response.data) def test_anonymous_user_cannot_update_draft_datasets(self): - ''' Unauthenticated user should not be able to know drafts exists in the first place''' + """ Unauthenticated user should not be able to know drafts exists in the first place""" # Set unauthenticated user self.client._credentials = {} # Fetches a published dataset since unauthenticated user can't get drafts - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) data = response.data - for http_verb in ['put', 'patch']: + for http_verb in ["put", "patch"]: update_request = getattr(self.client, http_verb) - response = update_request('/rest/v2/datasets/1', data, format="json") # published + response = update_request("/rest/v2/datasets/1", data, format="json") # published self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.status_code) - response = update_request('/rest/v2/datasets/2', data, format="json") # draft + response = update_request("/rest/v2/datasets/2", data, format="json") # draft self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.status_code) - response = update_request('/rest/v2/datasets/3', data, format="json") # draft + response = update_request("/rest/v2/datasets/3", data, format="json") # draft self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.status_code) # test for multiple datasets - response = update_request('/rest/v2/datasets', data, format="json") + response = update_request("/rest/v2/datasets", data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.status_code) ### @@ -254,25 +266,25 @@ def test_anonymous_user_cannot_update_draft_datasets(self): ### def test_draft_is_permanently_deleted_by_service_user(self): - '''Draft datasets should be permanently deleted from the database. - Only the dataset owner is able to delete draft datasets.''' + """Draft datasets should be permanently deleted from the database. + Only the dataset owner is able to delete draft datasets.""" # Set service-user - self._use_http_authorization(method='basic', username='metax') + self._use_http_authorization(method="basic", username="metax") for cr_id in (2, 3): - response = self.client.delete('/rest/v2/datasets/%d' % cr_id) + response = self.client.delete("/rest/v2/datasets/%d" % cr_id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) self.assertFalse(CatalogRecordV2.objects_unfiltered.filter(pk=cr_id).exists()) @responses.activate def test_draft_is_permanently_deleted_by_enduser(self): # Set end user - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.delete('/rest/v2/datasets/2') + response = self.client.delete("/rest/v2/datasets/2") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) self.assertFalse(CatalogRecordV2.objects_unfiltered.filter(pk=2).exists()) - response = self.client.delete('/rest/v2/datasets/3') + response = self.client.delete("/rest/v2/datasets/3") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) ### @@ -280,147 +292,179 @@ def test_draft_is_permanently_deleted_by_enduser(self): ### def test_service_users_can_save_draft_datasets(self): - ''' Drafts should be saved without preferred identifier ''' + """ Drafts should be saved without preferred identifier """ # test access as a service-user - self._use_http_authorization(method='basic', username='metax') + self._use_http_authorization(method="basic", username="metax") - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") - pid = response.data['research_dataset']['preferred_identifier'] + pid = response.data["research_dataset"]["preferred_identifier"] self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(pid == 'draft:%s' % response.data['identifier'], response.data) - self.assertTrue('urn' not in pid, response.data) - self.assertTrue('doi' not in pid, response.data) - self.assertTrue(response.data['state'] == 'draft', response.data) - - for queryparam in ('', '?draft=false'): - response = self.client.post('/rest/v2/datasets{}'.format(queryparam), self.cr_test_data, format="json") + self.assertTrue(pid == "draft:%s" % response.data["identifier"], response.data) + self.assertTrue("urn" not in pid, response.data) + self.assertTrue("doi" not in pid, response.data) + self.assertTrue(response.data["state"] == "draft", response.data) + + for queryparam in ("", "?draft=false"): + response = self.client.post( + "/rest/v2/datasets{}".format(queryparam), + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(response.data['state'] == 'published', response.data) + self.assertTrue(response.data["state"] == "published", response.data) ### # Tests for use_doi_for_published -field ### def test_use_doi_for_published_field(self): - ''' Drafts with 'use_doi' checkbox checked should have 'use_doi_for_published' == True - to tell that pid will be of type DOI when draft is published ''' - - self.cr_test_data['data_catalog'] = IDA_CATALOG - response = self.client.post('/rest/v2/datasets?pid_type=doi&draft=true', self.cr_test_data, format="json") + """Drafts with 'use_doi' checkbox checked should have 'use_doi_for_published' == True + to tell that pid will be of type DOI when draft is published""" + + self.cr_test_data["data_catalog"] = IDA_CATALOG + response = self.client.post( + "/rest/v2/datasets?pid_type=doi&draft=true", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertTrue('use_doi_for_published' in response.data) - self.assertTrue(response.data['use_doi_for_published'] is True, response.data) + self.assertTrue("use_doi_for_published" in response.data) + self.assertTrue(response.data["use_doi_for_published"] is True, response.data) # update draft & toggle use_doi_for_published-field - identifier = response.data['identifier'] + identifier = response.data["identifier"] - response.data['use_doi_for_published'] = False - update = self.client.put(f'/rest/v2/datasets/{identifier}', response.data, format="json") + response.data["use_doi_for_published"] = False + update = self.client.put(f"/rest/v2/datasets/{identifier}", response.data, format="json") self.assertEqual(update.status_code, status.HTTP_200_OK) - self.assertTrue(response.data['use_doi_for_published'] is False, response.data) + self.assertTrue(response.data["use_doi_for_published"] is False, response.data) - update.data['use_doi_for_published'] = True - toggle = self.client.put(f'/rest/v2/datasets/{identifier}', update.data, format="json") - self.assertTrue(toggle.data['use_doi_for_published'] is True, toggle.data) + update.data["use_doi_for_published"] = True + toggle = self.client.put(f"/rest/v2/datasets/{identifier}", update.data, format="json") + self.assertTrue(toggle.data["use_doi_for_published"] is True, toggle.data) # Get the dataset afterwards and check that new field is there - response = self.client.get(f'/rest/v2/datasets/{identifier}', format='json') - self.assertTrue('use_doi_for_published' in response.data, response.data) + response = self.client.get(f"/rest/v2/datasets/{identifier}", format="json") + self.assertTrue("use_doi_for_published" in response.data, response.data) # publish the draft - publish = self.client.post(f'/rpc/v2/datasets/publish_dataset?identifier={identifier}', format="json") + publish = self.client.post( + f"/rpc/v2/datasets/publish_dataset?identifier={identifier}", format="json" + ) self.assertEqual(publish.status_code, status.HTTP_200_OK, publish.data) # Published dataset should not return 'use_doi_for_published' # PID should be of type DOI when dataset is published - response = self.client.get(f'/rest/v2/datasets/{identifier}?include_user_metadata', format='json') + response = self.client.get( + f"/rest/v2/datasets/{identifier}?include_user_metadata", format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue('use_doi_for_published' not in response.data) - self.assertTrue('doi' in response.data['research_dataset']['preferred_identifier'], response.data) + self.assertTrue("use_doi_for_published" not in response.data) + self.assertTrue( + "doi" in response.data["research_dataset"]["preferred_identifier"], + response.data, + ) def test_use_doi_for_published_field_not_in_use(self): - ''' Drafts with 'use_doi' checkbox unchecked should have - pid of type URN when draft is published ''' - - for call in ('/rest/v2/datasets?pid_type=urn&draft=true', '/rest/v2/datasets?draft=true'): - self.cr_test_data['data_catalog'] = IDA_CATALOG + """Drafts with 'use_doi' checkbox unchecked should have + pid of type URN when draft is published""" + + for call in ( + "/rest/v2/datasets?pid_type=urn&draft=true", + "/rest/v2/datasets?draft=true", + ): + self.cr_test_data["data_catalog"] = IDA_CATALOG response = self.client.post(call, self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertTrue('use_doi_for_published' in response.data) - self.assertTrue(response.data['use_doi_for_published'] is False, response.data) + self.assertTrue("use_doi_for_published" in response.data) + self.assertTrue(response.data["use_doi_for_published"] is False, response.data) # update draft & toggle use_doi_for_published-field - identifier = response.data['identifier'] + identifier = response.data["identifier"] - response.data['use_doi_for_published'] = True - update = self.client.put(f'/rest/v2/datasets/{identifier}', response.data, format="json") - self.assertTrue(response.data['use_doi_for_published'] is True, response.data) + response.data["use_doi_for_published"] = True + update = self.client.put( + f"/rest/v2/datasets/{identifier}", response.data, format="json" + ) + self.assertTrue(response.data["use_doi_for_published"] is True, response.data) - update.data['use_doi_for_published'] = False - toggle = self.client.put(f'/rest/v2/datasets/{identifier}', update.data, format="json") - self.assertTrue(toggle.data['use_doi_for_published'] is False, toggle.data) + update.data["use_doi_for_published"] = False + toggle = self.client.put(f"/rest/v2/datasets/{identifier}", update.data, format="json") + self.assertTrue(toggle.data["use_doi_for_published"] is False, toggle.data) # publish the draft - publish = self.client.post(f'/rpc/v2/datasets/publish_dataset?identifier={identifier}', format="json") + publish = self.client.post( + f"/rpc/v2/datasets/publish_dataset?identifier={identifier}", + format="json", + ) self.assertEqual(publish.status_code, status.HTTP_200_OK, publish.data) - response = self.client.get(f'/rest/v2/datasets/{identifier}?include_user_metadata', format='json') - self.assertTrue('use_doi_for_published' not in response.data) - self.assertTrue('urn' in response.data['research_dataset']['preferred_identifier'], response.data) + response = self.client.get( + f"/rest/v2/datasets/{identifier}?include_user_metadata", format="json" + ) + self.assertTrue("use_doi_for_published" not in response.data) + self.assertTrue( + "urn" in response.data["research_dataset"]["preferred_identifier"], + response.data, + ) ### # Tests for draft data catalog ### def test_minimal_draft_dataset_creation(self): - ''' Drafts have different requirements for mandatory fields ''' - self._use_http_authorization(method='basic', username='metax') + """ Drafts have different requirements for mandatory fields """ + self._use_http_authorization(method="basic", username="metax") - response = self.client.post('/rest/v2/datasets?draft', self.minimal_draft, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.minimal_draft, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_allow_files_and_dirs_in_draft_catalog(self): - ''' Files can be added to datasets that are in draft catalog ''' - self._use_http_authorization(method='basic', username='metax') - - for type in ['files', 'directories']: - self.minimal_draft['research_dataset'][type] = [ - { - "identifier": "pid:urn:{}1".format('' if type == 'files' else 'dir:') - } + """ Files can be added to datasets that are in draft catalog """ + self._use_http_authorization(method="basic", username="metax") + + for type in ["files", "directories"]: + self.minimal_draft["research_dataset"][type] = [ + {"identifier": "pid:urn:{}1".format("" if type == "files" else "dir:")} ] - response = self.client.post('/rest/v2/datasets?draft', self.minimal_draft, format="json") + response = self.client.post( + "/rest/v2/datasets?draft", self.minimal_draft, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.get(f'/rest/v2/datasets/{response.data["id"]}/files', format="json") + response = self.client.get( + f'/rest/v2/datasets/{response.data["id"]}/files', format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertTrue(response.data, response.data) - self.minimal_draft['research_dataset'].pop(type) + self.minimal_draft["research_dataset"].pop(type) def test_publish_in_draft_catalog_is_not_allowed(self): - self._use_http_authorization(method='basic', username='metax') + self._use_http_authorization(method="basic", username="metax") - del self.cr_test_data['data_catalog'] - del self.cr_test_data['research_dataset']['files'] + del self.cr_test_data["data_catalog"] + del self.cr_test_data["research_dataset"]["files"] - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - response = self.client.post(f'/rpc/v2/datasets/publish_dataset?identifier={cr["id"]}', format="json") + response = self.client.post( + f'/rpc/v2/datasets/publish_dataset?identifier={cr["id"]}', format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_prevent_update_published_dataset_to_draft_catalog(self): - self._use_http_authorization(method='basic', username='metax') + self._use_http_authorization(method="basic", username="metax") - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - cr['data_catalog'] = {"identifier": DFT_CATALOG} + cr["data_catalog"] = {"identifier": DFT_CATALOG} response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) @@ -429,31 +473,29 @@ def test_allow_remote_resources_in_ida_for_drafts(self): """ When dataset is in draft state, it should be validated with dft catalog """ - self.cr_test_data['data_catalog'] = {"identifier": IDA_CATALOG} - self.cr_test_data['research_dataset']['remote_resources'] = [ - { - "title": "some title", - "use_category": {"identifier": "source"} - } + self.cr_test_data["data_catalog"] = {"identifier": IDA_CATALOG} + self.cr_test_data["research_dataset"]["remote_resources"] = [ + {"title": "some title", "use_category": {"identifier": "source"}} ] - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_allow_file_additions_to_drafts(self): """ Files can be added later and the metadata can be modified via RPC apis. """ - files = {"files": [ {"identifier": "pid:urn:5"} ]} + files = {"files": [{"identifier": "pid:urn:5"}]} for catalog in [IDA_CATALOG, ATT_CATALOG, DFT_CATALOG]: - self.cr_test_data['data_catalog'] = {"identifier": catalog} - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = {"identifier": catalog} + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data response = self.client.post(f'/rest/v2/datasets/{cr["id"]}/files', files, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['files_added'], 1, response.data) + self.assertEqual(response.data["files_added"], 1, response.data) + class CatalogRecordDraftsOfPublished(CatalogRecordApiWriteCommon): @@ -462,35 +504,41 @@ class CatalogRecordDraftsOfPublished(CatalogRecordApiWriteCommon): """ def _create_dataset(self, cumulative=False, draft=False, with_files=False): - draft = 'true' if draft else 'false' + draft = "true" if draft else "false" cumulative_state = 1 if cumulative else 0 - self.cr_test_data['cumulative_state'] = cumulative_state + self.cr_test_data["cumulative_state"] = cumulative_state if not with_files: - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) - response = self.client.post('/rest/v2/datasets?draft=%s' % draft, self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?draft=%s" % draft, self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data def _create_draft(self, id): # create draft - response = self.client.post('/rpc/v2/datasets/create_draft?identifier=%d' % id, format="json") + response = self.client.post( + "/rpc/v2/datasets/create_draft?identifier=%d" % id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - draft_id = response.data['id'] + draft_id = response.data["id"] # retrieve draft data for modifications - response = self.client.get('/rest/v2/datasets/%s' % draft_id, format="json") + response = self.client.get("/rest/v2/datasets/%s" % draft_id, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('draft_of' in response.data, True, response.data) - self.assertEqual(response.data['draft_of']['id'], id, response.data) + self.assertEqual("draft_of" in response.data, True, response.data) + self.assertEqual(response.data["draft_of"]["id"], id, response.data) return response.data def _merge_draft_changes(self, draft_id): - response = self.client.post('/rpc/v2/datasets/merge_draft?identifier=%d' % draft_id, format="json") + response = self.client.post( + "/rpc/v2/datasets/merge_draft?identifier=%d" % draft_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # draft should be permanently destroyed @@ -503,11 +551,15 @@ def test_create_draft_not_locked_before_merge(self): """ cr = self._create_dataset() # create draft - response = self.client.post('/rpc/v2/datasets/create_draft?identifier=%d' % cr['id'], format="json") + response = self.client.post( + "/rpc/v2/datasets/create_draft?identifier=%d" % cr["id"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # create draft - response = self.client.post('/rpc/v2/datasets/create_draft?identifier=%d' % cr['id'], format="json") + response = self.client.post( + "/rpc/v2/datasets/create_draft?identifier=%d" % cr["id"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_create_and_merge_draft(self): @@ -515,47 +567,51 @@ def test_create_and_merge_draft(self): A simple test to create a draft, change some metadata, and publish the changes. """ cr = self._create_dataset() - initial_title = cr['research_dataset']['title'] + initial_title = cr["research_dataset"]["title"] # create draft - draft_cr = self._create_draft(cr['id']) - draft_cr['research_dataset']['title']['en'] = 'modified title' - draft_cr['preservation_state'] = 20 + draft_cr = self._create_draft(cr["id"]) + draft_cr["research_dataset"]["title"]["en"] = "modified title" + draft_cr["preservation_state"] = 20 # ensure original now has a link to next_draft - response = self.client.get('/rest/v2/datasets/%s' % cr['id'], format="json") + response = self.client.get("/rest/v2/datasets/%s" % cr["id"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('next_draft' in response.data, True, response.data) - self.assertEqual(response.data['next_draft']['id'], draft_cr['id'], response.data) + self.assertEqual("next_draft" in response.data, True, response.data) + self.assertEqual(response.data["next_draft"]["id"], draft_cr["id"], response.data) # update the draft - response = self.client.put('/rest/v2/datasets/%d' % draft_cr['id'], draft_cr, format="json") + response = self.client.put("/rest/v2/datasets/%d" % draft_cr["id"], draft_cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure original dataset # - does not have the changes yet, since draft has not been published # - has next_draft link, pointing to the preciously created draft - original_cr = CR.objects.get(pk=cr['id']) - self.assertEqual(original_cr.research_dataset['title'], initial_title, original_cr.research_dataset['title']) - self.assertEqual(original_cr.next_draft_id, draft_cr['id']) + original_cr = CR.objects.get(pk=cr["id"]) + self.assertEqual( + original_cr.research_dataset["title"], + initial_title, + original_cr.research_dataset["title"], + ) + self.assertEqual(original_cr.next_draft_id, draft_cr["id"]) # merge draft changes back to original published dataset - self._merge_draft_changes(draft_cr['id']) + self._merge_draft_changes(draft_cr["id"]) # changes should now reflect on original published dataset - response = self.client.get('/rest/v2/datasets/%s' % cr['id'], format="json") + response = self.client.get("/rest/v2/datasets/%s" % cr["id"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual( - response.data['research_dataset']['title'], - draft_cr['research_dataset']['title'], - response.data + response.data["research_dataset"]["title"], + draft_cr["research_dataset"]["title"], + response.data, ) self.assertEqual( - response.data['preservation_state'], - draft_cr['preservation_state'], - response.data + response.data["preservation_state"], + draft_cr["preservation_state"], + response.data, ) - self.assertEqual('next_draft' in response.data, False, 'next_draft link should be gone') + self.assertEqual("next_draft" in response.data, False, "next_draft link should be gone") def test_missing_issued_date_is_generated_when_draft_is_merged(self): """ @@ -563,26 +619,26 @@ def test_missing_issued_date_is_generated_when_draft_is_merged(self): it back to original published dataset """ cr = self._create_dataset() - initial_issued_date = cr['research_dataset']['issued'] + initial_issued_date = cr["research_dataset"]["issued"] # create draft - draft_cr = self._create_draft(cr['id']) - draft_cr['research_dataset'].pop('issued', None) + draft_cr = self._create_draft(cr["id"]) + draft_cr["research_dataset"].pop("issued", None) # update the draft - response = self.client.put('/rest/v2/datasets/%d' % draft_cr['id'], draft_cr, format="json") + response = self.client.put("/rest/v2/datasets/%d" % draft_cr["id"], draft_cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # merge draft changes back to original published dataset - self._merge_draft_changes(draft_cr['id']) + self._merge_draft_changes(draft_cr["id"]) # changes should now reflect on original published dataset - response = self.client.get('/rest/v2/datasets/%s' % cr['id'], format="json") + response = self.client.get("/rest/v2/datasets/%s" % cr["id"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertNotEqual( - response.data['research_dataset']['issued'], + response.data["research_dataset"]["issued"], initial_issued_date, - response.data + response.data, ) def test_add_files_to_draft_normal_dataset(self): @@ -590,21 +646,23 @@ def test_add_files_to_draft_normal_dataset(self): Test case where dataset has 0 files in the beginning. """ cr = self._create_dataset(with_files=False) - draft_cr = self._create_draft(cr['id']) + draft_cr = self._create_draft(cr["id"]) # add file to draft - file_changes = { 'files': [{ 'identifier': 'pid:urn:1' }]} - response = self.client.post('/rest/v2/datasets/%d/files' % draft_cr['id'], file_changes, format="json") + file_changes = {"files": [{"identifier": "pid:urn:1"}]} + response = self.client.post( + "/rest/v2/datasets/%d/files" % draft_cr["id"], file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure original has no files - self.assertEqual(CR.objects.get(pk=cr['id']).files.count(), 0) + self.assertEqual(CR.objects.get(pk=cr["id"]).files.count(), 0) # merge draft changes back to original published dataset - self._merge_draft_changes(draft_cr['id']) + self._merge_draft_changes(draft_cr["id"]) # ensure original now has the files - self.assertEqual(CR.objects.get(pk=cr['id']).files.count(), 1) + self.assertEqual(CR.objects.get(pk=cr["id"]).files.count(), 1) def test_add_files_to_draft_when_files_already_exist(self): """ @@ -612,11 +670,13 @@ def test_add_files_to_draft_when_files_already_exist(self): or removing files should be prevented. """ cr = self._create_dataset(with_files=True) - draft_cr = self._create_draft(cr['id']) + draft_cr = self._create_draft(cr["id"]) # add file to draft - file_changes = { 'files': [{ 'identifier': 'pid:urn:10' }]} - response = self.client.post('/rest/v2/datasets/%d/files' % draft_cr['id'], file_changes, format="json") + file_changes = {"files": [{"identifier": "pid:urn:10"}]} + response = self.client.post( + "/rest/v2/datasets/%d/files" % draft_cr["id"], file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_add_files_to_draft_cumulative_dataset(self): @@ -624,45 +684,49 @@ def test_add_files_to_draft_cumulative_dataset(self): Adding new files to cumulative draft should be ok. Removing files should be prevented. """ cr = self._create_dataset(cumulative=True, with_files=True) - draft_cr = self._create_draft(cr['id']) + draft_cr = self._create_draft(cr["id"]) # try to remove a file. should be stopped - file_changes = { 'files': [{ 'identifier': 'pid:urn:1', 'exclude': True }]} - response = self.client.post('/rest/v2/datasets/%d/files' % draft_cr['id'], file_changes, format="json") + file_changes = {"files": [{"identifier": "pid:urn:1", "exclude": True}]} + response = self.client.post( + "/rest/v2/datasets/%d/files" % draft_cr["id"], file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # now add files - file_changes = { 'files': [{ 'identifier': 'pid:urn:10' }]} - response = self.client.post('/rest/v2/datasets/%d/files' % draft_cr['id'], file_changes, format="json") + file_changes = {"files": [{"identifier": "pid:urn:10"}]} + response = self.client.post( + "/rest/v2/datasets/%d/files" % draft_cr["id"], file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure original has no files YET - self.assertEqual(CR.objects.get(pk=cr['id']).files.count(), 2) + self.assertEqual(CR.objects.get(pk=cr["id"]).files.count(), 2) # merge draft changes back to original published dataset - self._merge_draft_changes(draft_cr['id']) + self._merge_draft_changes(draft_cr["id"]) # ensure original now has the files - self.assertEqual(CR.objects.get(pk=cr['id']).files.count(), 3) + self.assertEqual(CR.objects.get(pk=cr["id"]).files.count(), 3) def test_delete_draft_of_published_dataset(self): """ Delete draft of a published dataset. """ cr = self._create_dataset(with_files=False) - draft_cr = self._create_draft(cr['id']) + draft_cr = self._create_draft(cr["id"]) - response = self.client.delete('/rest/v2/datasets/%d' % draft_cr['id'], format="json") + response = self.client.delete("/rest/v2/datasets/%d" % draft_cr["id"], format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # draft should be deleted permanently - draft_found = CR.objects_unfiltered.filter(pk=draft_cr['id']).exists() + draft_found = CR.objects_unfiltered.filter(pk=draft_cr["id"]).exists() self.assertEqual(draft_found, False) # ensure original now has a link to next_draft - response = self.client.get('/rest/v2/datasets/%s' % cr['id'], format="json") + response = self.client.get("/rest/v2/datasets/%s" % cr["id"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('next_draft' in response.data, False, 'next_draft link should be gone') + self.assertEqual("next_draft" in response.data, False, "next_draft link should be gone") def test_deprecated_draft(self): """ @@ -670,37 +734,47 @@ def test_deprecated_draft(self): """ cr = self._create_dataset(with_files=True) - cr_files = self.client.get('/rest/v2/datasets/%s?include_user_metadata&file_details' - % cr['id'], format="json") - cr_files = [f['identifier'] for f in cr_files.data['research_dataset']['files']] + cr_files = self.client.get( + "/rest/v2/datasets/%s?include_user_metadata&file_details" % cr["id"], + format="json", + ) + cr_files = [f["identifier"] for f in cr_files.data["research_dataset"]["files"]] - draft_cr = self.client.post('/rpc/v2/datasets/create_draft?identifier=%d' % cr['id'], format="json") + draft_cr = self.client.post( + "/rpc/v2/datasets/create_draft?identifier=%d" % cr["id"], format="json" + ) - delete_files = self.client.delete('/rest/v2/files', cr_files, format="json") + delete_files = self.client.delete("/rest/v2/files", cr_files, format="json") self.assertEqual(delete_files.status_code, status.HTTP_200_OK, delete_files.data) - deprecated = self.client.get('/rest/v2/datasets/%s' % cr['id'], format='json') + deprecated = self.client.get("/rest/v2/datasets/%s" % cr["id"], format="json") self.assertEqual(deprecated.status_code, status.HTTP_200_OK, deprecated.data) - self.assertTrue(deprecated.data['deprecated'], deprecated.data['deprecated']) + self.assertTrue(deprecated.data["deprecated"], deprecated.data["deprecated"]) - response = self.client.post('/rpc/v2/datasets/merge_draft?identifier=%d' % draft_cr.data['id'], format="json") + response = self.client.post( + "/rpc/v2/datasets/merge_draft?identifier=%d" % draft_cr.data["id"], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('The origin dataset of this draft is deprecated' in response.data['detail'][0], response.data) + self.assertTrue( + "The origin dataset of this draft is deprecated" in response.data["detail"][0], + response.data, + ) def test_delete_published_dataset_with_an_unmerged_draft(self): """ Delete published dataset that has an unmerged draft """ cr = self._create_dataset(with_files=False) - draft_cr = self._create_draft(cr['id']) + draft_cr = self._create_draft(cr["id"]) - response = self.client.delete('/rest/v2/datasets/%d' % cr['id'], format="json") + response = self.client.delete("/rest/v2/datasets/%d" % cr["id"], format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # Both are be deleted - draft_found = CR.objects_unfiltered.filter(pk=draft_cr['id']).exists() + draft_found = CR.objects_unfiltered.filter(pk=draft_cr["id"]).exists() self.assertEqual(draft_found, False) - original = self.client.get('/rest/v2/datasets/%s?removed=true' % cr['id'], format='json') + original = self.client.get("/rest/v2/datasets/%s?removed=true" % cr["id"], format="json") self.assertEqual(original.status_code, status.HTTP_200_OK, original.data) - self.assertEqual(original.data['removed'], True, original.data) + self.assertEqual(original.data["removed"], True, original.data) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py index 86e20140..8cdc088c 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py @@ -24,19 +24,21 @@ class CatalogRecordApiWriteAssignFilesCommon(CatalogRecordApiWriteCommon, TestCl """ def _get_file_from_test_data(self): - from_test_data = self._get_object_from_test_data('file', requested_index=0) - from_test_data.update({ - "checksum": { - "value": "checksumvalue", - "algorithm": "SHA-256", - "checked": "2017-05-23T10:07:22.559656Z", - }, - "file_name": "must_replace", - "file_path": "must_replace", - "identifier": "must_replace", - "project_identifier": "must_replace", - "file_storage": self._get_object_from_test_data('filestorage', requested_index=0) - }) + from_test_data = self._get_object_from_test_data("file", requested_index=0) + from_test_data.update( + { + "checksum": { + "value": "checksumvalue", + "algorithm": "SHA-256", + "checked": "2017-05-23T10:07:22.559656Z", + }, + "file_name": "must_replace", + "file_path": "must_replace", + "identifier": "must_replace", + "project_identifier": "must_replace", + "file_storage": self._get_object_from_test_data("filestorage", requested_index=0), + } + ) return from_test_data def _form_test_file_hierarchy(self): @@ -48,72 +50,72 @@ def _form_test_file_hierarchy(self): { "file_name": "file_01.txt", "file_path": "/TestExperiment/Directory_1/Group_1/file_01.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_02.txt", "file_path": "/TestExperiment/Directory_1/Group_1/file_02.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_03.txt", "file_path": "/TestExperiment/Directory_1/Group_2/file_03.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_04.txt", "file_path": "/TestExperiment/Directory_1/Group_2/file_04.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_05.txt", "file_path": "/TestExperiment/Directory_1/file_05.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_06.txt", "file_path": "/TestExperiment/Directory_1/file_06.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_07.txt", "file_path": "/TestExperiment/Directory_2/Group_1/file_07.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_08.txt", "file_path": "/TestExperiment/Directory_2/Group_1/file_08.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_09.txt", "file_path": "/TestExperiment/Directory_2/Group_2/file_09.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_10.txt", "file_path": "/TestExperiment/Directory_2/Group_2/file_10.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_11.txt", "file_path": "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_12.txt", "file_path": "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_13.txt", "file_path": "/TestExperiment/Directory_2/file_13.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_14.txt", "file_path": "/TestExperiment/Directory_2/file_14.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, ] @@ -121,70 +123,69 @@ def _form_test_file_hierarchy(self): { "file_name": "file_15.txt", "file_path": "/SecondExperiment/Directory_1/Group_1/file_15.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_16.txt", "file_path": "/SecondExperiment/Directory_1/Group_1/file_16.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_17.txt", "file_path": "/SecondExperiment/Directory_1/Group_2/file_18.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_18.txt", "file_path": "/SecondExperiment/Directory_1/Group_2/file_18.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_19.txt", "file_path": "/SecondExperiment/Directory_1/file_19.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_20.txt", "file_path": "/SecondExperiment/Directory_1/file_20.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_21.txt", "file_path": "/SecondExperiment/Data/file_21.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_22.txt", "file_path": "/SecondExperiment/Data_Config/file_22.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_23.txt", "file_path": "/SecondExperiment/Data_Config/file_23.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, { "file_name": "file_24.txt", "file_path": "/SecondExperiment/Data/History/file_24.txt", - 'project_identifier': 'testproject_2', + "project_identifier": "testproject_2", }, - ] file_template = self._get_file_from_test_data() - del file_template['id'] - self._single_file_byte_size = file_template['byte_size'] + del file_template["id"] + self._single_file_byte_size = file_template["byte_size"] files_1 = [] for i, f in enumerate(file_data_1): file = deepcopy(file_template) - file.update(f, identifier='test:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="test:file:%s" % f["file_name"][-6:-4]) files_1.append(file) files_2 = [] for i, f in enumerate(file_data_2): file = deepcopy(file_template) - file.update(f, identifier='test:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="test:file:%s" % f["file_name"][-6:-4]) files_2.append(file) return files_1, files_2 @@ -196,8 +197,8 @@ def _research_dataset_or_file_changes(self, rd_or_file_changes): Methods who call this helper can treat the returned object as either, and only operate with "files" and "directories" keys. """ - if 'research_dataset' in rd_or_file_changes: - files_and_dirs = rd_or_file_changes['research_dataset'] + if "research_dataset" in rd_or_file_changes: + files_and_dirs = rd_or_file_changes["research_dataset"] else: files_and_dirs = rd_or_file_changes @@ -207,25 +208,25 @@ def _add_directory(self, ds, path, project=None): """ Add directory to research_dataset object or file_changes object. """ - params = { 'directory_path': path } + params = {"directory_path": path} if project: - params['project_identifier'] = project + params["project_identifier"] = project identifier = Directory.objects.get(**params).identifier files_and_dirs = self._research_dataset_or_file_changes(ds) - if 'directories' not in files_and_dirs: - files_and_dirs['directories'] = [] + if "directories" not in files_and_dirs: + files_and_dirs["directories"] = [] - files_and_dirs['directories'].append({ - "identifier": identifier, - "title": "Directory Title", - "description": "This is directory at %s" % path, - "use_category": { - "identifier": "method" + files_and_dirs["directories"].append( + { + "identifier": identifier, + "title": "Directory Title", + "description": "This is directory at %s" % path, + "use_category": {"identifier": "method"}, } - }) + ) def _add_file(self, ds, path): """ @@ -235,17 +236,17 @@ def _add_file(self, ds, path): files_and_dirs = self._research_dataset_or_file_changes(ds) - if 'files' not in files_and_dirs: - files_and_dirs['files'] = [] + if "files" not in files_and_dirs: + files_and_dirs["files"] = [] - files_and_dirs['files'].append({ - "identifier": identifier, - "title": "File Title", - "description": "This is file at %s" % path, - "use_category": { - "identifier": "method" + files_and_dirs["files"].append( + { + "identifier": identifier, + "title": "File Title", + "description": "This is file at %s" % path, + "use_category": {"identifier": "method"}, } - }) + ) def _add_nonexisting_directory(self, ds): """ @@ -253,14 +254,14 @@ def _add_nonexisting_directory(self, ds): """ files_and_dirs = self._research_dataset_or_file_changes(ds) - files_and_dirs['directories'] = [{ - "identifier": "doesnotexist", - "title": "Directory Title", - "description": "This is directory does not exist", - "use_category": { - "identifier": "method" + files_and_dirs["directories"] = [ + { + "identifier": "doesnotexist", + "title": "Directory Title", + "description": "This is directory does not exist", + "use_category": {"identifier": "method"}, } - }] + ] def _add_nonexisting_file(self, ds): """ @@ -268,14 +269,14 @@ def _add_nonexisting_file(self, ds): """ files_and_dirs = self._research_dataset_or_file_changes(ds) - files_and_dirs['files'] = [{ - "identifier": "doesnotexist", - "title": "File Title", - "description": "This is file does not exist", - "use_category": { - "identifier": "method" + files_and_dirs["files"] = [ + { + "identifier": "doesnotexist", + "title": "File Title", + "description": "This is file does not exist", + "use_category": {"identifier": "method"}, } - }] + ] def _remove_directory(self, ds, path): """ @@ -283,16 +284,16 @@ def _remove_directory(self, ds, path): """ files_and_dirs = self._research_dataset_or_file_changes(ds) - if 'directories' not in files_and_dirs: - raise Exception('ds has no dirs') + if "directories" not in files_and_dirs: + raise Exception("ds has no dirs") identifier = Directory.objects.get(directory_path=path).identifier - for i, dr in enumerate(files_and_dirs['directories']): - if dr['identifier'] == identifier: - ds['research_dataset']['directories'].pop(i) + for i, dr in enumerate(files_and_dirs["directories"]): + if dr["identifier"] == identifier: + ds["research_dataset"]["directories"].pop(i) return - raise Exception('path %s not found in directories' % path) + raise Exception("path %s not found in directories" % path) def _remove_file(self, ds, path): """ @@ -300,41 +301,41 @@ def _remove_file(self, ds, path): """ files_and_dirs = self._research_dataset_or_file_changes(ds) - if 'files' not in files_and_dirs: - raise Exception('ds has no files') + if "files" not in files_and_dirs: + raise Exception("ds has no files") identifier = File.objects.get(file_path=path).identifier - for i, f in enumerate(files_and_dirs['files']): - if f['identifier'] == identifier: - ds['research_dataset']['files'].pop(i) + for i, f in enumerate(files_and_dirs["files"]): + if f["identifier"] == identifier: + ds["research_dataset"]["files"].pop(i) return - raise Exception('path %s not found in files' % path) + raise Exception("path %s not found in files" % path) def _freeze_new_files(self): file_data = [ { "file_name": "file_90.txt", "file_path": "/TestExperiment/Directory_2/Group_3/file_90.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_91.txt", "file_path": "/TestExperiment/Directory_2/Group_3/file_91.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, ] file_template = self._get_file_from_test_data() - del file_template['id'] - self._single_file_byte_size = file_template['byte_size'] + del file_template["id"] + self._single_file_byte_size = file_template["byte_size"] files = [] for i, f in enumerate(file_data): file = deepcopy(file_template) - file.update(f, identifier='frozen:later:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="frozen:later:file:%s" % f["file_name"][-6:-4]) files.append(file) - response = self.client.post('/rest/v2/files', files, format="json") + response = self.client.post("/rest/v2/files", files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def _freeze_files_to_root(self): @@ -342,25 +343,25 @@ def _freeze_files_to_root(self): { "file_name": "file_56.txt", "file_path": "/TestExperiment/Directory_2/file_56.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, { "file_name": "file_57.txt", "file_path": "/TestExperiment/Directory_2/file_57.txt", - 'project_identifier': 'testproject', + "project_identifier": "testproject", }, ] file_template = self._get_file_from_test_data() - del file_template['id'] - self._single_file_byte_size = file_template['byte_size'] + del file_template["id"] + self._single_file_byte_size = file_template["byte_size"] files = [] for i, f in enumerate(file_data): file = deepcopy(file_template) - file.update(f, identifier='frozen:later:file:%s' % f['file_name'][-6:-4]) + file.update(f, identifier="frozen:later:file:%s" % f["file_name"][-6:-4]) files.append(file) - response = self.client.post('/rest/v2/files', files, format="json") + response = self.client.post("/rest/v2/files", files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def setUp(self): @@ -370,30 +371,37 @@ def setUp(self): - create 12 new files in a new project """ super().setUp() - self.cr_test_data['research_dataset'].pop('id', None) - self.cr_test_data['research_dataset'].pop('preferred_identifier', None) - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("id", None) + self.cr_test_data["research_dataset"].pop("preferred_identifier", None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) project_files = self._form_test_file_hierarchy() for p_files in project_files: - response = self.client.post('/rest/v2/files', p_files, format="json") + response = self.client.post("/rest/v2/files", p_files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def assert_preferred_identifier_changed(self, response, true_or_false): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('next_dataset_version' in response.data, true_or_false, - 'this field should only be present if preferred_identifier changed') + self.assertEqual( + "next_dataset_version" in response.data, + true_or_false, + "this field should only be present if preferred_identifier changed", + ) if true_or_false is True: - self.assertEqual(response.data['research_dataset']['preferred_identifier'] != - response.data['next_dataset_version']['preferred_identifier'], true_or_false) + self.assertEqual( + response.data["research_dataset"]["preferred_identifier"] + != response.data["next_dataset_version"]["preferred_identifier"], + true_or_false, + ) def assert_file_count(self, cr, expected_file_count): self.assertEqual( - CatalogRecordV2.objects.get(pk=cr if type(cr) is int else cr['id']).files.count(), expected_file_count + CatalogRecordV2.objects.get(pk=cr if type(cr) is int else cr["id"]).files.count(), + expected_file_count, ) def assert_total_files_byte_size(self, cr, expected_size): - self.assertEqual(cr['research_dataset']['total_files_byte_size'], expected_size) + self.assertEqual(cr["research_dataset"]["total_files_byte_size"], expected_size) class CatalogRecordApiWriteAssignFilesCommonV2(CatalogRecordApiWriteAssignFilesCommon): @@ -415,16 +423,16 @@ def _add_file(self, ds, path, with_metadata=False): # else: addition entry only, which will not be persisted. keep only identifier - files_and_dirs['files'][-1] = { 'identifier': files_and_dirs['files'][-1]['identifier'] } + files_and_dirs["files"][-1] = {"identifier": files_and_dirs["files"][-1]["identifier"]} def _exclude_file(self, ds, path): self._add_file(ds, path) files_and_dirs = self._research_dataset_or_file_changes(ds) - files_and_dirs['files'][-1]['exclude'] = True + files_and_dirs["files"][-1]["exclude"] = True - assert len(files_and_dirs['files'][-1]) == 2 + assert len(files_and_dirs["files"][-1]) == 2 def _add_directory(self, ds, path, project=None, with_metadata=False): super()._add_directory(ds, path) @@ -436,33 +444,34 @@ def _add_directory(self, ds, path, project=None, with_metadata=False): # else: addition entry only, which will not be persisted. keep only identifier - files_and_dirs['directories'][-1] = { 'identifier': files_and_dirs['directories'][-1]['identifier'] } + files_and_dirs["directories"][-1] = { + "identifier": files_and_dirs["directories"][-1]["identifier"] + } def _exclude_directory(self, ds, path): self._add_directory(ds, path) files_and_dirs = self._research_dataset_or_file_changes(ds) - files_and_dirs['directories'][-1]['exclude'] = True + files_and_dirs["directories"][-1]["exclude"] = True - assert len(files_and_dirs['directories'][-1]) == 2 + assert len(files_and_dirs["directories"][-1]) == 2 class CatalogRecordFileHandling(CatalogRecordApiWriteAssignFilesCommonV2): - def _set_token_authentication(self): self.create_end_user_data_catalogs() self.token = get_test_oidc_token() - self.token['group_names'].append('IDA01:testproject') - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:testproject") + self._use_http_authorization(method="bearer", token=self.token) self._mock_token_validation_succeeds() def _create_draft(self): - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data['id'] + return response.data["id"] @responses.activate def test_authorization(self): @@ -474,9 +483,13 @@ def test_authorization(self): self._set_token_authentication() for with_metadata in (False, True): - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_06.txt', with_metadata=with_metadata) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") + self._add_file( + self.cr_test_data, + "/TestExperiment/Directory_1/file_06.txt", + with_metadata=with_metadata, + ) + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) @responses.activate @@ -487,24 +500,24 @@ def test_retrieve_dataset_file_projects(self): """ self._set_token_authentication() - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # user owns dataset - response = self.client.get('/rest/v2/datasets/%d/projects' % cr_id, format="json") + response = self.client.get("/rest/v2/datasets/%d/projects" % cr_id, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), 1, response.data) # user is authenticated, but does not own dataset - response = self.client.get('/rest/v2/datasets/1/projects', format="json") + response = self.client.get("/rest/v2/datasets/1/projects", format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # anonymous user self.client._credentials = {} - response = self.client.get('/rest/v2/datasets/%d/projects' % cr_id, format="json") + response = self.client.get("/rest/v2/datasets/%d/projects" % cr_id, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_include_user_metadata_parameter(self): @@ -512,24 +525,30 @@ def test_include_user_metadata_parameter(self): When retrieving datasets, by default the "user metadata", or "dataset-specific file metadata" stored in research_dataset.files and research_dataset.directories should not be returned. """ - response = self.client.get('/rest/v2/datasets/1', format="json") + response = self.client.get("/rest/v2/datasets/1", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('files' in response.data['research_dataset'], False, response.data) + self.assertEqual("files" in response.data["research_dataset"], False, response.data) - response = self.client.get('/rest/v2/datasets/1?include_user_metadata', format="json") + response = self.client.get("/rest/v2/datasets/1?include_user_metadata", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('files' in response.data['research_dataset'], True, response.data) + self.assertEqual("files" in response.data["research_dataset"], True, response.data) def test_create_files_are_saved(self): """ A very simple "add two individual files" test. Only entries with dataset-specific metadata should be persisted in research_dataset.files. """ - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_06.txt', with_metadata=True) - response = self.client.post('/rest/v2/datasets?include_user_metadata', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") + self._add_file( + self.cr_test_data, + "/TestExperiment/Directory_1/file_06.txt", + with_metadata=True, + ) + response = self.client.post( + "/rest/v2/datasets?include_user_metadata", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data['research_dataset']['files']), 1) + self.assertEqual(len(response.data["research_dataset"]["files"]), 1) self.assert_file_count(response.data, 2) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 2) @@ -537,12 +556,20 @@ def test_allowed_projects(self): """ Test the ?allowed_projects=x,y parameter when adding files. """ - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") - response = self.client.post('/rest/v2/datasets?allowed_projects=testproject', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?allowed_projects=testproject", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.post('/rest/v2/datasets?allowed_projects=no,projects', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?allowed_projects=no,projects", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_create_directories_are_saved(self): @@ -550,11 +577,15 @@ def test_create_directories_are_saved(self): A very simple "add two individual directories" test. Only entries with dataset-specific metadata should be persisted in research_dataset.directories. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_2', with_metadata=True) - response = self.client.post('/rest/v2/datasets?include_user_metadata', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + self._add_directory( + self.cr_test_data, "/TestExperiment/Directory_1/Group_2", with_metadata=True + ) + response = self.client.post( + "/rest/v2/datasets?include_user_metadata", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data['research_dataset']['directories']), 1) + self.assertEqual(len(response.data["research_dataset"]["directories"]), 1) self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) @@ -562,9 +593,9 @@ def test_create_exclude_files(self): """ Add directory of files, but exclude one file. """ - self._exclude_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_01.txt') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._exclude_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_01.txt") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 5) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 5) @@ -573,9 +604,9 @@ def test_create_exclude_directories(self): """ Add directory of files, but exclude one sub directory. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1') - self._exclude_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1") + self._exclude_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 4) self.assert_total_files_byte_size(response.data, self._single_file_byte_size * 4) @@ -590,54 +621,66 @@ def test_update_add_and_exclude_files(self): file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_01.txt') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_01.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_added'), 1, response.data) + self.assertEqual(response.data.get("files_added"), 1, response.data) self.assert_file_count(cr_id, 1) # executing the same request with same file entry should make no difference - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_added'), 0, response.data) + self.assertEqual(response.data.get("files_added"), 0, response.data) self.assert_file_count(cr_id, 1) # adding a directory should add files that are not already added file_changes = {} - self._add_directory(file_changes, '/TestExperiment/Directory_1/Group_1') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._add_directory(file_changes, "/TestExperiment/Directory_1/Group_1") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_added'), 1, response.data) + self.assertEqual(response.data.get("files_added"), 1, response.data) self.assert_file_count(cr_id, 2) # add even more files file_changes = {} - self._add_directory(file_changes, '/TestExperiment/Directory_1') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._add_directory(file_changes, "/TestExperiment/Directory_1") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_added'), 4, response.data) + self.assertEqual(response.data.get("files_added"), 4, response.data) self.assert_file_count(cr_id, 6) # exclude previously added files from one directroy file_changes = {} - self._exclude_directory(file_changes, '/TestExperiment/Directory_1/Group_1') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._exclude_directory(file_changes, "/TestExperiment/Directory_1/Group_1") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_removed'), 2, response.data) + self.assertEqual(response.data.get("files_removed"), 2, response.data) self.assert_file_count(cr_id, 4) # exclude all previously added files, but keep one file by adding an "add file" entry file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_2/file_03.txt') - self._exclude_directory(file_changes, '/TestExperiment') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_2/file_03.txt") + self._exclude_directory(file_changes, "/TestExperiment") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_added'), 0, response.data) - self.assertEqual(response.data.get('files_removed'), 3, response.data) + self.assertEqual(response.data.get("files_added"), 0, response.data) + self.assertEqual(response.data.get("files_removed"), 3, response.data) self.assert_file_count(cr_id, 1) def test_files_can_be_added_once_after_publishing(self): @@ -646,111 +689,129 @@ def test_files_can_be_added_once_after_publishing(self): without needing to create new dataset versions, so this is permitted. Subsequent file changes will requiree creating a new draft version first. """ - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] self.assert_file_count(cr_id, 0) file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files_added'), 1, response.data) + self.assertEqual(response.data.get("files_added"), 1, response.data) # try to add a second time. should give an error file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('Changing files of a published dataset' in response.data['detail'][0], True, response.data) + self.assertEqual( + "Changing files of a published dataset" in response.data["detail"][0], + True, + response.data, + ) def test_prevent_changing_files_on_deprecated_datasets(self): cr = CR.objects.get(pk=1) cr.deprecated = True cr.force_save() - file_changes = { - 'files': [{ 'identifier': 'some file' }] - } + file_changes = {"files": [{"identifier": "some file"}]} - response = self.client.post('/rest/v2/datasets/1/files', file_changes, format="json") + response = self.client.post("/rest/v2/datasets/1/files", file_changes, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('Changing files of a deprecated' in response.data['detail'][0], True, response.data) + self.assertEqual( + "Changing files of a deprecated" in response.data["detail"][0], + True, + response.data, + ) def test_prevent_adding_files_with_normal_update(self): cr_id = self._create_draft() - cr = self.client.get(f'/rest/v2/datasets/{cr_id}', format="json").data + cr = self.client.get(f"/rest/v2/datasets/{cr_id}", format="json").data - for type in ['files', 'directories']: - cr['research_dataset'][type] = [ - { - "identifier": 'pid:urn:%s1' % '' if type == 'files' else 'dir:' - } + for type in ["files", "directories"]: + cr["research_dataset"][type] = [ + {"identifier": "pid:urn:%s1" % "" if type == "files" else "dir:"} ] - response = self.client.put(f'/rest/v2/datasets/{cr_id}?include_user_metadata', cr, format="json") + response = self.client.put( + f"/rest/v2/datasets/{cr_id}?include_user_metadata", cr, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(type in response.data['research_dataset'], False, response.data) + self.assertEqual(type in response.data["research_dataset"], False, response.data) - cr['research_dataset'].pop(type) + cr["research_dataset"].pop(type) # test for published dataset - self.cr_test_data.pop('files', None) - self.cr_test_data.pop('directories', None) + self.cr_test_data.pop("files", None) + self.cr_test_data.pop("directories", None) - response = self.client.post('/rest/v2/datasets/', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets/", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - for type in ['files', 'directories']: - cr['research_dataset'][type] = [ - { - "identifier": 'pid:urn:%s1' % '' if type == 'files' else 'dir:' - } + for type in ["files", "directories"]: + cr["research_dataset"][type] = [ + {"identifier": "pid:urn:%s1" % "" if type == "files" else "dir:"} ] - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}?include_user_metadata', cr, format="json") + response = self.client.put( + f'/rest/v2/datasets/{cr["id"]}?include_user_metadata', cr, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(type in response.data['research_dataset'], False, response.data) + self.assertEqual(type in response.data["research_dataset"], False, response.data) - cr['research_dataset'].pop(type) + cr["research_dataset"].pop(type) def test_directory_entries_are_processed_in_order(self): """ Directory entries should executed in the order they are given in the request body. """ # excluding should do nothing, since "add directory" entry is later - self._exclude_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._exclude_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 6) - self.cr_test_data['research_dataset'].pop('directories') + self.cr_test_data["research_dataset"].pop("directories") # exclusion should now have effect, since it is last - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_1') - self._exclude_directory(self.cr_test_data, '/TestExperiment/Directory_1/Group_1') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_1") + self._exclude_directory(self.cr_test_data, "/TestExperiment/Directory_1/Group_1") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assert_file_count(response.data, 4) def test_allow_file_changes_only_on_drafts(self): - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_01.txt') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_01.txt") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/v2/datasets/%d/files' % response.data['id'], file_changes, format="json") + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % response.data["id"], + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('Changing files of a published dataset' in response.data['detail'][0], True, response.data) + self.assertEqual( + "Changing files of a published dataset" in response.data["detail"][0], + True, + response.data, + ) class CatalogRecordUserMetadata(CatalogRecordApiWriteAssignFilesCommonV2): @@ -764,16 +825,18 @@ def test_retrieve_file_metadata_only(self): cr_id = 11 # retrieve all "user metadata" of adataset - response = self.client.get('/rest/v2/datasets/%d/files/user_metadata' % cr_id, format="json") + response = self.client.get( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr = CR.objects.only('research_dataset').get(pk=cr_id) + cr = CR.objects.only("research_dataset").get(pk=cr_id) - for object_type in ('files', 'directories'): + for object_type in ("files", "directories"): self.assertEqual( len(cr.research_dataset[object_type]), len(response.data[object_type]), - response.data + response.data, ) files_and_dirs = response.data @@ -781,234 +844,307 @@ def test_retrieve_file_metadata_only(self): params = [ { # a pid of single file - 'pid': 'pid:urn:2', - 'directory': 'false', - 'expect_to_find': True, + "pid": "pid:urn:2", + "directory": "false", + "expect_to_find": True, }, { # a pid of a directory - 'pid': 'pid:urn:dir:2', - 'directory': 'true', - 'expect_to_find': True, + "pid": "pid:urn:dir:2", + "directory": "true", + "expect_to_find": True, }, { # a pid of a directory not part of this dataset - 'pid': 'should not be found', - 'directory': 'true', - 'expect_to_find': False, - } + "pid": "should not be found", + "directory": "true", + "expect_to_find": False, + }, ] for p in params: # retrieve a single metadata entry response = self.client.get( - '/rest/v2/datasets/%d/files/%s/user_metadata?directory=%s' % (cr_id, p['pid'], p['directory']), - format="json" + "/rest/v2/datasets/%d/files/%s/user_metadata?directory=%s" + % (cr_id, p["pid"], p["directory"]), + format="json", ) - if p['expect_to_find'] is True: + if p["expect_to_find"] is True: self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('identifier' in response.data, True, response.data) + self.assertEqual("identifier" in response.data, True, response.data) else: self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) continue - if p['directory'] == 'true': - object_type = 'directories' + if p["directory"] == "true": + object_type = "directories" else: - object_type = 'files' + object_type = "files" for obj in files_and_dirs[object_type]: - if obj['identifier'] == response.data['identifier']: - if p['expect_to_find'] is True: + if obj["identifier"] == response.data["identifier"]: + if p["expect_to_find"] is True: self.assertEqual(obj, response.data, response.data) break else: - self.fail('pid %s should not have been found' % p['pid']) + self.fail("pid %s should not have been found" % p["pid"]) else: - if p['expect_to_find'] is True: - self.fail('Retrieved object %s was not found in research_dataset file data?' % p['pid']) + if p["expect_to_find"] is True: + self.fail( + "Retrieved object %s was not found in research_dataset file data?" + % p["pid"] + ) def test_dataset_files_schema(self): """ Ensure new schema file dataset_files_schema.json is used. """ - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # use a non-schema field file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt', with_metadata=True) - file_changes['files'][0]['some_unexpected_file'] = 'should raise error' + self._add_file( + file_changes, + "/TestExperiment/Directory_1/Group_1/file_02.txt", + with_metadata=True, + ) + file_changes["files"][0]["some_unexpected_file"] = "should raise error" - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('is not valid' in str(response.data['detail'][0]), True, response.data) + self.assertEqual("is not valid" in str(response.data["detail"][0]), True, response.data) # various mandatory fields missing - for mandatory_field in ('identifier', 'use_category'): + for mandatory_field in ("identifier", "use_category"): file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt', with_metadata=True) - file_changes['files'][0].pop(mandatory_field) + self._add_file( + file_changes, + "/TestExperiment/Directory_1/Group_1/file_02.txt", + with_metadata=True, + ) + file_changes["files"][0].pop(mandatory_field) - response = self.client.post('/rest/v2/datasets/%d/files' % cr_id, file_changes, format="json") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('is not valid' in str(response.data['detail'][0]), True, response.data) + self.assertEqual("is not valid" in str(response.data["detail"][0]), True, response.data) def test_update_metadata_only(self): - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('files' in response.data['research_dataset'], False, response.data) + self.assertEqual("files" in response.data["research_dataset"], False, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # add metadata for one file file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt', with_metadata=True) - file_changes['files'][0]['title'] = 'New title' + self._add_file( + file_changes, + "/TestExperiment/Directory_1/Group_1/file_02.txt", + with_metadata=True, + ) + file_changes["files"][0]["title"] = "New title" - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual( - CR.objects.only('research_dataset').get(pk=cr_id).research_dataset['files'][0]['title'], - file_changes['files'][0]['title'] + CR.objects.only("research_dataset").get(pk=cr_id).research_dataset["files"][0]["title"], + file_changes["files"][0]["title"], ) # add metadata for two directories file_changes = {} - self._add_directory(file_changes, '/TestExperiment', with_metadata=True) - self._add_directory(file_changes, '/TestExperiment/Directory_1', with_metadata=True) - file_changes['directories'][0]['title'] = 'New dir title' - file_changes['directories'][0]['description'] = 'New dir description' - file_changes['directories'][1]['title'] = 'New dir title 2' - file_changes['directories'][1]['description'] = 'New dir description 2' + self._add_directory(file_changes, "/TestExperiment", with_metadata=True) + self._add_directory(file_changes, "/TestExperiment/Directory_1", with_metadata=True) + file_changes["directories"][0]["title"] = "New dir title" + file_changes["directories"][0]["description"] = "New dir description" + file_changes["directories"][1]["title"] = "New dir title 2" + file_changes["directories"][1]["description"] = "New dir description 2" file_count_before = CR.objects.get(pk=cr_id).files.count() - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr = CR.objects.only('research_dataset').get(pk=cr_id) + cr = CR.objects.only("research_dataset").get(pk=cr_id) self.assertEqual( - cr.files.count(), file_count_before, 'operation should only update metadata, but not add files' + cr.files.count(), + file_count_before, + "operation should only update metadata, but not add files", ) for index in (0, 1): - for field in ('title', 'description'): + for field in ("title", "description"): self.assertEqual( - cr.research_dataset['directories'][index][field], - file_changes['directories'][index][field] + cr.research_dataset["directories"][index][field], + file_changes["directories"][index][field], ) # update only one field using patch file_changes = {} - self._add_directory(file_changes, '/TestExperiment', with_metadata=True) - file_changes['directories'][0] = { - 'identifier': file_changes['directories'][0]['identifier'], - 'title': 'Changed dir title' + self._add_directory(file_changes, "/TestExperiment", with_metadata=True) + file_changes["directories"][0] = { + "identifier": file_changes["directories"][0]["identifier"], + "title": "Changed dir title", } - response = self.client.patch('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + response = self.client.patch( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual( - CR.objects.only('research_dataset').get(pk=cr_id).research_dataset['directories'][0]['title'], - file_changes['directories'][0]['title'] + CR.objects.only("research_dataset") + .get(pk=cr_id) + .research_dataset["directories"][0]["title"], + file_changes["directories"][0]["title"], ) # remove metadata entry. it should be ok that there are normal metadata-addition entries included # in the request body too. file_changes = {} - self._add_directory(file_changes, '/TestExperiment/Directory_1/Group_1', with_metadata=True) - self._add_directory(file_changes, '/TestExperiment') - file_changes['directories'][-1]['delete'] = True + self._add_directory(file_changes, "/TestExperiment/Directory_1/Group_1", with_metadata=True) + self._add_directory(file_changes, "/TestExperiment") + file_changes["directories"][-1]["delete"] = True - entry_to_delete = file_changes['directories'][-1]['identifier'] + entry_to_delete = file_changes["directories"][-1]["identifier"] - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr = CR.objects.only('research_dataset').get(pk=cr_id) + cr = CR.objects.only("research_dataset").get(pk=cr_id) self.assertEqual( - entry_to_delete in [ dr['identifier'] for dr in cr.research_dataset['directories'] ], - False + entry_to_delete in [dr["identifier"] for dr in cr.research_dataset["directories"]], + False, ) # dont allow adding metadata entries for files that are not actually included in the dataset file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_2/file_03.txt', with_metadata=True) - non_existing_file = file_changes['files'][-1]['identifier'] + self._add_file( + file_changes, + "/TestExperiment/Directory_1/Group_2/file_03.txt", + with_metadata=True, + ) + non_existing_file = file_changes["files"][-1]["identifier"] - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('are not included' in response.data['detail'][0], True, response.data) - self.assertEqual(non_existing_file in response.data['data'], True, response.data) + self.assertEqual("are not included" in response.data["detail"][0], True, response.data) + self.assertEqual(non_existing_file in response.data["data"], True, response.data) def test_delete_all_file_meta_data(self): # create dataset with file - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('files' in response.data['research_dataset'], False, response.data) + self.assertEqual("files" in response.data["research_dataset"], False, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # add metadata for one file file_changes = {} - self._add_file(file_changes, '/TestExperiment/Directory_1/Group_1/file_02.txt', with_metadata=True) - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + self._add_file( + file_changes, + "/TestExperiment/Directory_1/Group_1/file_02.txt", + with_metadata=True, + ) + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - file_data = CR.objects.only('research_dataset').get(pk=cr_id).research_dataset['files'][0] - file_id = file_data['identifier'] + file_data = CR.objects.only("research_dataset").get(pk=cr_id).research_dataset["files"][0] + file_id = file_data["identifier"] # delete data of all files - file_changes = {'files': []} - file_changes['files'].append({'delete': True}) - file_changes['files'][0]['identifier'] = file_id - - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + file_changes = {"files": []} + file_changes["files"].append({"delete": True}) + file_changes["files"][0]["identifier"] = file_id + + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr = CR.objects.only('research_dataset').get(pk=cr_id) + cr = CR.objects.only("research_dataset").get(pk=cr_id) - self.assertFalse(cr.research_dataset.get('files', False), 'file metadata must not be there') + self.assertFalse(cr.research_dataset.get("files", False), "file metadata must not be there") # add metadata for one directory file_changes = {} - self._add_directory(file_changes, '/TestExperiment', with_metadata=True) - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + self._add_directory(file_changes, "/TestExperiment", with_metadata=True) + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - dir_data = CR.objects.only('research_dataset').get(pk=cr_id).research_dataset['directories'][0] + dir_data = ( + CR.objects.only("research_dataset").get(pk=cr_id).research_dataset["directories"][0] + ) - dir_id = dir_data['identifier'] + dir_id = dir_data["identifier"] # delete data of all directories - file_changes = {'directories': []} - file_changes['directories'].append({'delete': True}) - file_changes['directories'][0]['identifier'] = dir_id - - response = self.client.put('/rest/v2/datasets/%d/files/user_metadata' % cr_id, file_changes, format="json") + file_changes = {"directories": []} + file_changes["directories"].append({"delete": True}) + file_changes["directories"][0]["identifier"] = dir_id + + response = self.client.put( + "/rest/v2/datasets/%d/files/user_metadata" % cr_id, + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr = CR.objects.only('research_dataset').get(pk=cr_id) + cr = CR.objects.only("research_dataset").get(pk=cr_id) - self.assertFalse(cr.research_dataset.get('directories', False), 'directory metadata must not be there') - self.assertFalse(cr.research_dataset.get('files', False), 'file metadata must not be there') + self.assertFalse( + cr.research_dataset.get("directories", False), + "directory metadata must not be there", + ) + self.assertFalse(cr.research_dataset.get("files", False), "file metadata must not be there") class CatalogRecordFileHandlingCumulativeDatasets(CatalogRecordApiWriteAssignFilesCommonV2): @@ -1020,23 +1156,25 @@ class CatalogRecordFileHandlingCumulativeDatasets(CatalogRecordApiWriteAssignFil def setUp(self): super().setUp() - self.cr_test_data.pop('files', None) - self.cr_test_data.pop('directories', None) - self.cr_test_data['cumulative_state'] = CR.CUMULATIVE_STATE_YES - self._add_file(self.cr_test_data, '/TestExperiment/Directory_1/file_05.txt') + self.cr_test_data.pop("files", None) + self.cr_test_data.pop("directories", None) + self.cr_test_data["cumulative_state"] = CR.CUMULATIVE_STATE_YES + self._add_file(self.cr_test_data, "/TestExperiment/Directory_1/file_05.txt") - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.cr_id = response.data['id'] + self.cr_id = response.data["id"] def test_add_files_to_cumulative_dataset(self): """ Adding files to an existing cumulative dataset should be ok. """ file_data = {} - self._add_file(file_data, '/TestExperiment/Directory_1/file_06.txt') + self._add_file(file_data, "/TestExperiment/Directory_1/file_06.txt") - response = self.client.post('/rest/v2/datasets/%d/files' % self.cr_id, file_data, format="json") + response = self.client.post( + "/rest/v2/datasets/%d/files" % self.cr_id, file_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr = CR.objects.get(pk=self.cr_id) self.assertEqual(cr.files.count(), 2) @@ -1047,26 +1185,34 @@ def test_exclude_files_from_cumulative_dataset(self): Excluding files from an existing cumulative dataset should be prevented. """ file_data = {} - self._exclude_file(file_data, '/TestExperiment/Directory_1/file_05.txt') + self._exclude_file(file_data, "/TestExperiment/Directory_1/file_05.txt") - response = self.client.post('/rest/v2/datasets/%d/files' % self.cr_id, file_data, format="json") + response = self.client.post( + "/rest/v2/datasets/%d/files" % self.cr_id, file_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('Excluding files from a cumulative' in response.data['detail'][0], True, response.data) + self.assertEqual( + "Excluding files from a cumulative" in response.data["detail"][0], + True, + response.data, + ) self.assertEqual(CR.objects.get(pk=self.cr_id).files.count(), 1) def test_change_cumulative_states_on_draft(self): """ Ensure changing cumulative states on a new cr draft works as expected. """ - response = self.client.post('/rest/v2/datasets?draft=true', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?draft=true", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # set to NO -> should remove all trace of ever being cumulative response = self.client.post( - f'/rpc/v2/datasets/change_cumulative_state?identifier={cr_id}&cumulative_state={CR.CUMULATIVE_STATE_NO}', - format="json" + f"/rpc/v2/datasets/change_cumulative_state?identifier={cr_id}&cumulative_state={CR.CUMULATIVE_STATE_NO}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) @@ -1078,8 +1224,8 @@ def test_change_cumulative_states_on_draft(self): # set back to YES response = self.client.post( - f'/rpc/v2/datasets/change_cumulative_state?identifier={cr_id}&cumulative_state={CR.CUMULATIVE_STATE_YES}', - format="json" + f"/rpc/v2/datasets/change_cumulative_state?identifier={cr_id}&cumulative_state={CR.CUMULATIVE_STATE_YES}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) @@ -1092,13 +1238,15 @@ def test_change_cumulative_states_on_draft(self): # set back to CLOSED -> does not make sense on a draft # note: too long url... yes its ugly response = self.client.post( - '/rpc/v2/datasets/change_cumulative_state' - f'?identifier={cr_id}&cumulative_state={CR.CUMULATIVE_STATE_CLOSED}', - format="json" + "/rpc/v2/datasets/change_cumulative_state" + f"?identifier={cr_id}&cumulative_state={CR.CUMULATIVE_STATE_CLOSED}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) self.assertEqual( - 'For a new dataset, cumulative_state must be' in response.data['detail'][0], True, response.data + "For a new dataset, cumulative_state must be" in response.data["detail"][0], + True, + response.data, ) def test_change_cumulative_states_on_published_draft(self): @@ -1108,40 +1256,45 @@ def test_change_cumulative_states_on_published_draft(self): """ # create draft of the cumulative dataset - response = self.client.post(f'/rpc/v2/datasets/create_draft?identifier={self.cr_id}', format="json") + response = self.client.post( + f"/rpc/v2/datasets/create_draft?identifier={self.cr_id}", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - draft_id = response.data['id'] + draft_id = response.data["id"] # set to NO -> should be prevented response = self.client.post( - '/rpc/v2/datasets/change_cumulative_state' - f'?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_NO}', - format="json" + "/rpc/v2/datasets/change_cumulative_state" + f"?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_NO}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) self.assertTrue( - 'Cumulative dataset cannot be set to non-cumulative' in response.data['detail'][0], response.data + "Cumulative dataset cannot be set to non-cumulative" in response.data["detail"][0], + response.data, ) # set to YES -> should do nothing, since is already cumulative response = self.client.post( - '/rpc/v2/datasets/change_cumulative_state' - f'?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_YES}', - format="json" + "/rpc/v2/datasets/change_cumulative_state" + f"?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_YES}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # set to CLOSED -> should work response = self.client.post( - '/rpc/v2/datasets/change_cumulative_state' - f'?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_CLOSED}', - format="json" + "/rpc/v2/datasets/change_cumulative_state" + f"?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_CLOSED}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # merge draft changes - response = self.client.post(f'/rpc/v2/datasets/merge_draft?identifier={draft_id}', format="json") + response = self.client.post( + f"/rpc/v2/datasets/merge_draft?identifier={draft_id}", format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # check updates were applied @@ -1157,25 +1310,31 @@ def test_add_files_to_cumulative_published_draft_dataset(self): num_files_added = 1 # create draft of the cumulative dataset - response = self.client.post('/rpc/v2/datasets/create_draft?identifier=%d' % self.cr_id, format="json") + response = self.client.post( + "/rpc/v2/datasets/create_draft?identifier=%d" % self.cr_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - draft_id = response.data['id'] + draft_id = response.data["id"] # add files to draft file_data = {} - self._add_file(file_data, '/TestExperiment/Directory_1/Group_1/file_01.txt') - self._add_file(file_data, '/TestExperiment/Directory_1/Group_1/file_02.txt') + self._add_file(file_data, "/TestExperiment/Directory_1/Group_1/file_01.txt") + self._add_file(file_data, "/TestExperiment/Directory_1/Group_1/file_02.txt") - num_files_added += len(file_data['files']) + num_files_added += len(file_data["files"]) - response = self.client.post('/rest/v2/datasets/%d/files' % draft_id, file_data, format="json") + response = self.client.post( + "/rest/v2/datasets/%d/files" % draft_id, file_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure excluding is prevented file_data = {} - self._exclude_file(file_data, '/TestExperiment/Directory_1/Group_1/file_02.txt') - response = self.client.post('/rest/v2/datasets/%d/files' % draft_id, file_data, format="json") + self._exclude_file(file_data, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % draft_id, file_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # ensure files were added to draft @@ -1187,7 +1346,9 @@ def test_add_files_to_cumulative_published_draft_dataset(self): self.assertEqual(cr.files.count(), 1) # merge draft changes - response = self.client.post('/rpc/v2/datasets/merge_draft?identifier=%d' % draft_id, format="json") + response = self.client.post( + "/rpc/v2/datasets/merge_draft?identifier=%d" % draft_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # published dataset should now have files added @@ -1195,21 +1356,25 @@ def test_add_files_to_cumulative_published_draft_dataset(self): self.assertEqual(cr.files.count(), num_files_added) # create another draft - response = self.client.post('/rpc/v2/datasets/create_draft?identifier=%d' % self.cr_id, format="json") + response = self.client.post( + "/rpc/v2/datasets/create_draft?identifier=%d" % self.cr_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - draft_id = response.data['id'] + draft_id = response.data["id"] # close cumulative period on draft response = self.client.post( - '/rpc/v2/datasets/change_cumulative_state' - f'?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_CLOSED}', - format="json" + "/rpc/v2/datasets/change_cumulative_state" + f"?identifier={draft_id}&cumulative_state={CR.CUMULATIVE_STATE_CLOSED}", + format="json", ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # merge draft changes again - response = self.client.post('/rpc/v2/datasets/merge_draft?identifier=%d' % draft_id, format="json") + response = self.client.post( + "/rpc/v2/datasets/merge_draft?identifier=%d" % draft_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # published dataset should now have cumulativity closed @@ -1220,6 +1385,6 @@ def test_change_preservation_state(self): """ PAS process should not be started while cumulative period is open. """ - cr = { 'preservation_state': 10 } - response = self.client.patch('/rest/v2/datasets/%s' % self.cr_id, cr, format="json") + cr = {"preservation_state": 10} + response = self.client.patch("/rest/v2/datasets/%s" % self.cr_id, cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/pas.py b/src/metax_api/tests/api/rest/v2/views/datasets/pas.py index 17f23ab3..fbbabc18 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/pas.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/pas.py @@ -24,49 +24,57 @@ class CatalogRecordApiReadPreservationStateTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_search_by_preservation_state(self): - ''' + """ Various simple filtering requests - ''' - response = self.client.get('/rest/v2/datasets?preservation_state=0') + """ + response = self.client.get("/rest/v2/datasets?preservation_state=0") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']) > 2, True, - 'There should have been multiple results for preservation_state=0 request') + self.assertEqual( + len(response.data["results"]) > 2, + True, + "There should have been multiple results for preservation_state=0 request", + ) - response = self.client.get('/rest/v2/datasets?preservation_state=10') + response = self.client.get("/rest/v2/datasets?preservation_state=10") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 2) + self.assertEqual(len(response.data["results"]), 2) - response = self.client.get('/rest/v2/datasets?preservation_state=40') + response = self.client.get("/rest/v2/datasets?preservation_state=40") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) + self.assertEqual(len(response.data["results"]), 1) def test_read_catalog_record_search_by_preservation_state_666(self): - response = self.client.get('/rest/v2/datasets?preservation_state=666') + response = self.client.get("/rest/v2/datasets?preservation_state=666") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0, 'should return empty list') + self.assertEqual(len(response.data["results"]), 0, "should return empty list") def test_read_catalog_record_search_by_preservation_state_many(self): - response = self.client.get('/rest/v2/datasets?preservation_state=10,40') + response = self.client.get("/rest/v2/datasets?preservation_state=10,40") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) - self.assertEqual(response.data['results'][0]['preservation_state'], 10) - self.assertEqual(response.data['results'][1]['preservation_state'], 10) - self.assertEqual(response.data['results'][2]['preservation_state'], 40) + self.assertEqual(len(response.data["results"]), 3) + self.assertEqual(response.data["results"][0]["preservation_state"], 10) + self.assertEqual(response.data["results"][1]["preservation_state"], 10) + self.assertEqual(response.data["results"][2]["preservation_state"], 40) def test_read_catalog_record_search_by_preservation_state_invalid_value(self): - response = self.client.get('/rest/v2/datasets?preservation_state=1,a') + response = self.client.get("/rest/v2/datasets?preservation_state=1,a") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('is not an integer' in response.data['preservation_state'][0], True, - 'Error should say letter a is not an integer') + self.assertEqual( + "is not an integer" in response.data["preservation_state"][0], + True, + "Error should say letter a is not an integer", + ) - response = self.client.get('/rest/v2/datasets?preservation_state=1,a') + response = self.client.get("/rest/v2/datasets?preservation_state=1,a") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('is not an integer' in response.data['preservation_state'][0], True, - 'Error should say letter a is not an integer') + self.assertEqual( + "is not an integer" in response.data["preservation_state"][0], + True, + "Error should say letter a is not an integer", + ) class CatalogRecordApiReadPASFilter(CatalogRecordApiReadCommon): - def test_pas_filter(self): """ Test query param pas_filter which should search from various fields using the same search term. @@ -76,52 +84,58 @@ def test_pas_filter(self): cr = CatalogRecordV2.objects.get(pk=1) cr.preservation_state = 10 cr.contract_id = 1 - cr.research_dataset['title']['en'] = 'Catch me if you can' - cr.research_dataset['title']['fi'] = 'Ota kiinni jos saat' - cr.research_dataset['curator'] = [] - cr.research_dataset['curator'].append({ 'name': 'Seppo Hovi' }) - cr.research_dataset['curator'].append({ 'name': 'Esa Nieminen' }) - cr.research_dataset['curator'].append({ 'name': 'Aku Ankka' }) - cr.research_dataset['curator'].append({ 'name': 'Jaska Jokunen' }) + cr.research_dataset["title"]["en"] = "Catch me if you can" + cr.research_dataset["title"]["fi"] = "Ota kiinni jos saat" + cr.research_dataset["curator"] = [] + cr.research_dataset["curator"].append({"name": "Seppo Hovi"}) + cr.research_dataset["curator"].append({"name": "Esa Nieminen"}) + cr.research_dataset["curator"].append({"name": "Aku Ankka"}) + cr.research_dataset["curator"].append({"name": "Jaska Jokunen"}) cr.force_save() contract = Contract.objects.get(pk=1) - contract.contract_json['title'] = 'An Important Agreement' + contract.contract_json["title"] = "An Important Agreement" contract.save() metax_user = django_settings.API_METAX_USER - self._use_http_authorization(username=metax_user['username'], password=metax_user['password']) + self._use_http_authorization( + username=metax_user["username"], password=metax_user["password"] + ) # beging testing - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=if you') + response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=if you") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) + self.assertEqual(len(response.data["results"]), 1) - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=kiinni jos') - self.assertEqual(len(response.data['results']), 1) + response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=kiinni jos") + self.assertEqual(len(response.data["results"]), 1) - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=niemine') - self.assertEqual(len(response.data['results']), 1) + response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=niemine") + self.assertEqual(len(response.data["results"]), 1) # more than 3 curators, requires typing exact case-sensitive name... see comments in related code - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=jokunen') - self.assertEqual(len(response.data['results']), 0) - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=Jaska Jokunen') - self.assertEqual(len(response.data['results']), 1) + response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=jokunen") + self.assertEqual(len(response.data["results"]), 0) + response = self.client.get( + "/rest/v2/datasets?preservation_state=10&pas_filter=Jaska Jokunen" + ) + self.assertEqual(len(response.data["results"]), 1) # contract_id 1 has several other associated test datasets - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=agreement') - self.assertEqual(len(response.data['results']), 3) + response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=agreement") + self.assertEqual(len(response.data["results"]), 3) - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=does not exist') - self.assertEqual(len(response.data['results']), 0) + response = self.client.get( + "/rest/v2/datasets?preservation_state=10&pas_filter=does not exist" + ) + self.assertEqual(len(response.data["results"]), 0) def test_pas_filter_is_restricted(self): """ Query param is permitted to users metax and tpas. """ - response = self.client.get('/rest/v2/datasets?preservation_state=10&pas_filter=hmmm') + response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=hmmm") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @@ -136,12 +150,12 @@ def _create_pas_dataset_from_id(self, id): Helper method to create a pas dataset by updating the given dataset's preservation_state to 80. """ - cr_data = self.client.get('/rest/v2/datasets/%d' % id, format="json").data - self.assertEqual(cr_data['preservation_state'], 0) + cr_data = self.client.get("/rest/v2/datasets/%d" % id, format="json").data + self.assertEqual(cr_data["preservation_state"], 0) # update state to "accepted to pas" -> should create pas version - cr_data['preservation_state'] = 80 - response = self.client.put('/rest/v2/datasets/%d' % id, cr_data, format="json") + cr_data["preservation_state"] = 80 + response = self.client.put("/rest/v2/datasets/%d" % id, cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data @@ -149,24 +163,28 @@ def setUp(self): super().setUp() dc = DataCatalog.objects.get(pk=1) catalog_json = dc.catalog_json - catalog_json['identifier'] = django_settings.PAS_DATA_CATALOG_IDENTIFIER - catalog_json['dataset_versioning'] = False + catalog_json["identifier"] = django_settings.PAS_DATA_CATALOG_IDENTIFIER + catalog_json["dataset_versioning"] = False dc = DataCatalog.objects.create( catalog_json=catalog_json, date_created=get_tz_aware_now_without_micros(), - catalog_record_services_create='testuser,api_auth_user,metax', - catalog_record_services_edit='testuser,api_auth_user,metax' + catalog_record_services_create="testuser,api_auth_user,metax", + catalog_record_services_edit="testuser,api_auth_user,metax", ) def test_update_catalog_record_pas_state_allowed_value(self): - cr = self.client.get('/rest/v2/datasets/1').data - cr['preservation_state'] = 30 - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1").data + cr["preservation_state"] = 30 + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(cr.preservation_state_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), - True, 'Timestamp should have been updated during object update') + self.assertEqual( + cr.preservation_state_modified + >= get_tz_aware_now_without_micros() - timedelta(seconds=5), + True, + "Timestamp should have been updated during object update", + ) def test_update_pas_state_to_needs_revalidation(self): """ @@ -181,14 +199,14 @@ def test_update_pas_state_to_needs_revalidation(self): cr.save() # retrieve record and ensure testing state was set correctly... - cr_data = self.client.get('/rest/v2/datasets/1', format="json").data - self.assertEqual(cr_data['preservation_state'], preservation_state_value) + cr_data = self.client.get("/rest/v2/datasets/1", format="json").data + self.assertEqual(cr_data["preservation_state"], preservation_state_value) # strike and verify - cr_data['research_dataset']['title']['en'] = 'Metadata has been updated on loop %d' % i - response = self.client.put('/rest/v2/datasets/1', cr_data, format="json") + cr_data["research_dataset"]["title"]["en"] = "Metadata has been updated on loop %d" % i + response = self.client.put("/rest/v2/datasets/1", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['preservation_state'], 60) + self.assertEqual(response.data["preservation_state"], 60) def test_prevent_file_changes_when_record_in_pas_process(self): """ @@ -198,44 +216,48 @@ def test_prevent_file_changes_when_record_in_pas_process(self): cr.preservation_state = 10 cr.save() - file_changes = { - 'files': [{ 'identifier': 'pid:urn:3' }] - } + file_changes = {"files": [{"identifier": "pid:urn:3"}]} - response = self.client.post('/rest/v2/datasets/1/files', file_changes, format="json") + response = self.client.post("/rest/v2/datasets/1/files", file_changes, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('Changing files of a published' in response.data['detail'][0], True, response.data) + self.assertEqual( + "Changing files of a published" in response.data["detail"][0], + True, + response.data, + ) def test_non_pas_dataset_unallowed_preservation_state_values(self): # update non-pas dataset - cr = self.client.get('/rest/v2/datasets/1').data + cr = self.client.get("/rest/v2/datasets/1").data values = [ - 11, # not one of known values - 90, # value not allowed for non-pas datasets + 11, # not one of known values + 90, # value not allowed for non-pas datasets ] for invalid_value in values: - cr['preservation_state'] = invalid_value - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr["preservation_state"] = invalid_value + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_pas_dataset_unallowed_preservation_state_values(self): # create pas dataset and update with invalid values - cr = self.client.get('/rest/v2/datasets/1').data - cr['preservation_state'] = 80 - response = self.client.put('/rest/v2/datasets/1', cr, format="json") - cr = self.client.get('/rest/v2/datasets/%d' % response.data['preservation_dataset_version']['id']).data + cr = self.client.get("/rest/v2/datasets/1").data + cr["preservation_state"] = 80 + response = self.client.put("/rest/v2/datasets/1", cr, format="json") + cr = self.client.get( + "/rest/v2/datasets/%d" % response.data["preservation_dataset_version"]["id"] + ).data values = [ 70, # value not allowed for non-pas datasets - 111, # not one of known values - 150 # not one of known values + 111, # not one of known values + 150, # not one of known values ] for invalid_value in values: - cr['preservation_state'] = invalid_value - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr["preservation_state"] = invalid_value + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_pas_version_is_created_on_preservation_state_80(self): @@ -243,40 +265,57 @@ def test_pas_version_is_created_on_preservation_state_80(self): When preservation_state is updated to 'accepted to pas', a copy should be created into designated PAS catalog. """ - cr_data = self.client.get('/rest/v2/datasets/1', format="json").data - self.assertEqual(cr_data['preservation_state'], 0) + cr_data = self.client.get("/rest/v2/datasets/1", format="json").data + self.assertEqual(cr_data["preservation_state"], 0) origin_dataset = self._create_pas_dataset_from_id(1) - self.assertEqual(origin_dataset['preservation_state'], 0) - self.assertEqual('new_version_created' in origin_dataset, True) - self.assertEqual(origin_dataset['new_version_created']['version_type'], 'pas') - self.assertEqual('preservation_dataset_version' in origin_dataset, True) - self.assertEqual('other_identifier' in origin_dataset['research_dataset'], True) - self.assertEqual(origin_dataset['research_dataset']['other_identifier'][0]['notation'].startswith('doi'), True) + self.assertEqual(origin_dataset["preservation_state"], 0) + self.assertEqual("new_version_created" in origin_dataset, True) + self.assertEqual(origin_dataset["new_version_created"]["version_type"], "pas") + self.assertEqual("preservation_dataset_version" in origin_dataset, True) + self.assertEqual("other_identifier" in origin_dataset["research_dataset"], True) + self.assertEqual( + origin_dataset["research_dataset"]["other_identifier"][0]["notation"].startswith("doi"), + True, + ) # get pas version and verify links and other signature values are there pas_dataset = self.client.get( - '/rest/v2/datasets/%d' % origin_dataset['preservation_dataset_version']['id'], format="json" + "/rest/v2/datasets/%d" % origin_dataset["preservation_dataset_version"]["id"], + format="json", ).data - self.assertEqual(pas_dataset['data_catalog']['identifier'], django_settings.PAS_DATA_CATALOG_IDENTIFIER) - self.assertEqual(pas_dataset['preservation_state'], 80) - self.assertEqual(pas_dataset['preservation_dataset_origin_version']['id'], origin_dataset['id']) self.assertEqual( - pas_dataset['preservation_dataset_origin_version']['preferred_identifier'], - origin_dataset['research_dataset']['preferred_identifier'] + pas_dataset["data_catalog"]["identifier"], + django_settings.PAS_DATA_CATALOG_IDENTIFIER, + ) + self.assertEqual(pas_dataset["preservation_state"], 80) + self.assertEqual( + pas_dataset["preservation_dataset_origin_version"]["id"], + origin_dataset["id"], + ) + self.assertEqual( + pas_dataset["preservation_dataset_origin_version"]["preferred_identifier"], + origin_dataset["research_dataset"]["preferred_identifier"], + ) + self.assertEqual("deprecated" in pas_dataset["preservation_dataset_origin_version"], True) + self.assertEqual("other_identifier" in pas_dataset["research_dataset"], True) + self.assertEqual( + pas_dataset["research_dataset"]["other_identifier"][0]["notation"].startswith("urn"), + True, ) - self.assertEqual('deprecated' in pas_dataset['preservation_dataset_origin_version'], True) - self.assertEqual('other_identifier' in pas_dataset['research_dataset'], True) - self.assertEqual(pas_dataset['research_dataset']['other_identifier'][0]['notation'].startswith('urn'), True) # when pas copy is created, origin_dataset preservation_state should have been set back to 0 - cr_data = self.client.get('/rest/v2/datasets/1', format="json").data - self.assertEqual(cr_data['preservation_state'], 0) + cr_data = self.client.get("/rest/v2/datasets/1", format="json").data + self.assertEqual(cr_data["preservation_state"], 0) # ensure files match between original and pas cr cr = CatalogRecordV2.objects.get(pk=1) - cr_files = cr.files.filter().order_by('id').values_list('id', flat=True) - cr_pas_files = cr.preservation_dataset_version.files.filter().order_by('id').values_list('id', flat=True) + cr_files = cr.files.filter().order_by("id").values_list("id", flat=True) + cr_pas_files = ( + cr.preservation_dataset_version.files.filter() + .order_by("id") + .values_list("id", flat=True) + ) # note: trying to assert querysets will result in failure. must evaluate the querysets first by iterating them self.assertEqual([f for f in cr_files], [f for f in cr_pas_files]) @@ -288,35 +327,43 @@ def test_origin_dataset_cant_have_multiple_pas_versions(self): """ self._create_pas_dataset_from_id(1) - cr_data = { 'preservation_state': 80 } - response = self.client.patch('/rest/v2/datasets/1', cr_data, format="json") + cr_data = {"preservation_state": 80} + response = self.client.patch("/rest/v2/datasets/1", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('already has a PAS version' in response.data['detail'][0], True, response.data) + self.assertEqual( + "already has a PAS version" in response.data["detail"][0], + True, + response.data, + ) def test_dataset_can_be_created_directly_into_pas_catalog(self): """ Datasets that are created directly into PAS catalog should not have any enforced rules about changing preservation_state value. """ - self.cr_test_data['data_catalog'] = django_settings.PAS_DATA_CATALOG_IDENTIFIER - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = django_settings.PAS_DATA_CATALOG_IDENTIFIER + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual( - response.data['research_dataset']['preferred_identifier'].startswith('doi'), + response.data["research_dataset"]["preferred_identifier"].startswith("doi"), True, - response.data['research_dataset']['preferred_identifier'] + response.data["research_dataset"]["preferred_identifier"], ) # when created directly into pas catalog, preservation_state can be updated # to whatever, whenever - ps_values = [ v[0] for v in CatalogRecordV2.PRESERVATION_STATE_CHOICES ] + ps_values = [v[0] for v in CatalogRecordV2.PRESERVATION_STATE_CHOICES] for ps in ps_values: - cr_data = { 'preservation_state': ps } - response = self.client.patch('/rest/v2/datasets/%d' % response.data['id'], cr_data, format="json") + cr_data = {"preservation_state": ps} + response = self.client.patch( + "/rest/v2/datasets/%d" % response.data["id"], cr_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr_data = { 'preservation_state': 0 } - response = self.client.patch('/rest/v2/datasets/%d' % response.data['id'], cr_data, format="json") + cr_data = {"preservation_state": 0} + response = self.client.patch( + "/rest/v2/datasets/%d" % response.data["id"], cr_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_unfreezing_files_does_not_deprecate_pas_dataset(self): @@ -326,9 +373,12 @@ def test_unfreezing_files_does_not_deprecate_pas_dataset(self): been stored in PAS. """ cr = self._create_pas_dataset_from_id(1) - response = self.client.delete('/rest/v2/files/1', format="json") + response = self.client.delete("/rest/v2/files/1", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/datasets/%d' % cr['preservation_dataset_version']['id'], format="json") + response = self.client.get( + "/rest/v2/datasets/%d" % cr["preservation_dataset_version"]["id"], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['deprecated'], False) + self.assertEqual(response.data["deprecated"], False) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/read.py b/src/metax_api/tests/api/rest/v2/views/datasets/read.py index 6c75abe7..8f5ad4a6 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/read.py @@ -5,8 +5,8 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from copy import deepcopy import urllib.parse +from copy import deepcopy from datetime import timedelta import responses @@ -27,28 +27,32 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(CatalogRecordApiReadCommon, cls).setUpClass() def setUp(self): - self.cr_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=0) - self.pk = self.cr_from_test_data['id'] - self.metadata_version_identifier = self.cr_from_test_data['research_dataset']['metadata_version_identifier'] - self.preferred_identifier = self.cr_from_test_data['research_dataset']['preferred_identifier'] - self.identifier = self.cr_from_test_data['identifier'] + self.cr_from_test_data = self._get_object_from_test_data("catalogrecord", requested_index=0) + self.pk = self.cr_from_test_data["id"] + self.metadata_version_identifier = self.cr_from_test_data["research_dataset"][ + "metadata_version_identifier" + ] + self.preferred_identifier = self.cr_from_test_data["research_dataset"][ + "preferred_identifier" + ] + self.identifier = self.cr_from_test_data["identifier"] self._use_http_authorization() def create_legacy_dataset(self): cr = deepcopy(self.cr_from_test_data) - cr['data_catalog'] = settings.LEGACY_CATALOGS[0] - cr.pop('identifier') - cr['research_dataset']['preferred_identifier'] = 'ldhkrfdwam' - cr['research_dataset'].pop('files') - cr['research_dataset'].pop('total_files_byte_size') - response = self.client.post('/rest/v2/datasets', cr, format="json") + cr["data_catalog"] = settings.LEGACY_CATALOGS[0] + cr.pop("identifier") + cr["research_dataset"]["preferred_identifier"] = "ldhkrfdwam" + cr["research_dataset"].pop("files") + cr["research_dataset"].pop("total_files_byte_size") + response = self.client.post("/rest/v2/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data['id'] + return response.data["id"] class CatalogRecordApiReadBasicTests(CatalogRecordApiReadCommon): @@ -58,7 +62,7 @@ class CatalogRecordApiReadBasicTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_list(self): - response = self.client.get('/rest/v2/datasets') + response = self.client.get("/rest/v2/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_catalog_record_list_by_ids(self): @@ -67,99 +71,109 @@ def test_read_catalog_record_list_by_ids(self): cr_id_lists = [ [1, 4, 6], [1, 4, 6, 777], - ['cr955e904-e3dd-4d7e-99f1-3fed446f96d7', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d6', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d5'], - ['cr955e904-e3dd-4d7e-99f1-3fed446f96d7', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d6', - 'cr955e904-e3dd-4d7e-99f1-3fed446f96d5', - 'something'] + [ + "cr955e904-e3dd-4d7e-99f1-3fed446f96d7", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d6", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", + ], + [ + "cr955e904-e3dd-4d7e-99f1-3fed446f96d7", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d6", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", + "something", + ], ] for id_list in cr_id_lists: - response = self.client.post('/rest/v2/datasets/list', id_list, format="json") + response = self.client.post("/rest/v2/datasets/list", id_list, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) + self.assertEqual(len(response.data["results"]), 3) # check that fields parameter works - response = self.client.post('/rest/v2/datasets/list?fields=id', id_list, format="json") + response = self.client.post("/rest/v2/datasets/list?fields=id", id_list, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) - self.assertEqual(len(response.data['results'][0].keys()), 1) - self.assertEqual(list(response.data['results'][0].keys()), ['id']) + self.assertEqual(len(response.data["results"]), 3) + self.assertEqual(len(response.data["results"][0].keys()), 1) + self.assertEqual(list(response.data["results"][0].keys()), ["id"]) # Failing/empty tests - cr_bad_lists = [ - ['something'], - [999, 777] - ] + cr_bad_lists = [["something"], [999, 777]] for bad_list in cr_bad_lists: - response = self.client.post('/rest/v2/datasets/list', bad_list, format="json") + response = self.client.post("/rest/v2/datasets/list", bad_list, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results'], []) + self.assertEqual(response.data["results"], []) - response = self.client.post('/rest/v2/datasets/list', [], format="json") + response = self.client.post("/rest/v2/datasets/list", [], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Received empty list of identifiers' in response.data['detail']) + self.assertTrue("Received empty list of identifiers" in response.data["detail"]) def test_read_catalog_record_details_by_pk(self): - response = self.client.get('/rest/v2/datasets/%s' % self.pk) + response = self.client.get("/rest/v2/datasets/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['identifier'], self.identifier) - self.assertEqual('identifier' in response.data['data_catalog'], True) + self.assertEqual(response.data["identifier"], self.identifier) + self.assertEqual("identifier" in response.data["data_catalog"], True) def test_read_catalog_record_details_by_identifier(self): - response = self.client.get('/rest/v2/datasets/%s' % self.identifier) + response = self.client.get("/rest/v2/datasets/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['identifier'], - self.identifier) + self.assertEqual(response.data["identifier"], self.identifier) def test_get_by_preferred_identifier(self): cr = CatalogRecordV2.objects.get(pk=1) - cr.research_dataset['preferred_identifier'] = '%s-/uhoh/special.chars?all&around' % cr.preferred_identifier + cr.research_dataset["preferred_identifier"] = ( + "%s-/uhoh/special.chars?all&around" % cr.preferred_identifier + ) cr.force_save() - response = self.client.get('/rest/v2/datasets?preferred_identifier=%s' % - urllib.parse.quote(cr.preferred_identifier)) + response = self.client.get( + "/rest/v2/datasets?preferred_identifier=%s" + % urllib.parse.quote(cr.preferred_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], cr.preferred_identifier) + self.assertEqual( + response.data["research_dataset"]["preferred_identifier"], + cr.preferred_identifier, + ) def test_get_removed_by_preferred_identifier(self): self._use_http_authorization() - response = self.client.delete('/rest/v2/datasets/%s' % self.identifier) + response = self.client.delete("/rest/v2/datasets/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - response = self.client.get('/rest/v2/datasets?preferred_identifier=%s&removed=true' % - urllib.parse.quote(self.preferred_identifier)) + response = self.client.get( + "/rest/v2/datasets?preferred_identifier=%s&removed=true" + % urllib.parse.quote(self.preferred_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_by_preferred_identifier_search_prefers_oldest_data_catalog(self): - ''' + """ Search by preferred_identifier should prefer hits from oldest created catalogs which are assumed to be att/fairdata catalogs. - ''' + """ # get a cr that has alternate records - cr = self._get_object_from_test_data('catalogrecord', requested_index=9) - pid = cr['research_dataset']['preferred_identifier'] + cr = self._get_object_from_test_data("catalogrecord", requested_index=9) + pid = cr["research_dataset"]["preferred_identifier"] # verify there are more than one record with same pid! count = CatalogRecordV2.objects.filter(research_dataset__preferred_identifier=pid).count() - self.assertEqual(count > 1, True, 'makes no sense to test with a pid that exists only once') + self.assertEqual(count > 1, True, "makes no sense to test with a pid that exists only once") # the retrieved record should be the one that is in catalog 1 - response = self.client.get('/rest/v2/datasets?preferred_identifier=%s' % - urllib.parse.quote(pid)) - self.assertEqual('alternate_record_set' in response.data, True) - self.assertEqual(response.data['data_catalog']['id'], cr['data_catalog']) + response = self.client.get( + "/rest/v2/datasets?preferred_identifier=%s" % urllib.parse.quote(pid) + ) + self.assertEqual("alternate_record_set" in response.data, True) + self.assertEqual(response.data["data_catalog"]["id"], cr["data_catalog"]) def test_read_catalog_record_details_not_found(self): - response = self.client.get('/rest/v2/datasets/shouldnotexist') + response = self.client.get("/rest/v2/datasets/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_catalog_record_metadata_version_identifiers(self): - response = self.client.get('/rest/v2/datasets/metadata_version_identifiers') + response = self.client.get("/rest/v2/datasets/metadata_version_identifiers") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(isinstance(response.data, list)) self.assertTrue(len(response.data) > 0) @@ -168,7 +182,7 @@ def test_get_unique_preferred_identifiers(self): """ Get all unique preferred_identifiers, no matter if they are the latest dataset version or not. """ - response = self.client.get('/rest/v2/datasets/unique_preferred_identifiers') + response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(isinstance(response.data, list)) self.assertTrue(len(response.data) > 0) @@ -178,14 +192,14 @@ def test_get_unique_preferred_identifiers(self): self._create_new_ds() self._create_new_ds() - response = self.client.get('/rest/v2/datasets/unique_preferred_identifiers') - self.assertEqual(len(response.data) - ids_len, 2, 'should be two new PIDs') + response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers") + self.assertEqual(len(response.data) - ids_len, 2, "should be two new PIDs") def test_get_latest_unique_preferred_identifiers(self): """ Get all unique preferred_identifiers, but only from the latest dataset versions. """ - response = self.client.get('/rest/v2/datasets/unique_preferred_identifiers?latest') + response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers?latest") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(isinstance(response.data, list)) self.assertTrue(len(response.data) > 0) @@ -195,70 +209,84 @@ def test_get_latest_unique_preferred_identifiers(self): # files change cr = CatalogRecordV2.objects.get(pk=1) - new_file_id = cr.files.all().order_by('-id').first().id + 1 - file_from_testdata = self._get_object_from_test_data('file', requested_index=new_file_id) + new_file_id = cr.files.all().order_by("-id").first().id + 1 + file_from_testdata = self._get_object_from_test_data("file", requested_index=new_file_id) # warning, this is actual file metadata, would not pass schema validation if sent through api - cr.research_dataset['files'] = [file_from_testdata] + cr.research_dataset["files"] = [file_from_testdata] cr.save() - response = self.client.get('/rest/v2/datasets/unique_preferred_identifiers?latest') - self.assertEqual(ids_len, len(response.data), 'count should stay the same') + response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers?latest") + self.assertEqual(ids_len, len(response.data), "count should stay the same") # create new self._create_new_ds() self._create_new_ds() - response = self.client.get('/rest/v2/datasets/unique_preferred_identifiers?latest') - self.assertEqual(len(response.data) - ids_len, 2, 'should be two new PIDs') + response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers?latest") + self.assertEqual(len(response.data) - ids_len, 2, "should be two new PIDs") def test_expand_relations(self): cr = CatalogRecordV2.objects.get(pk=1) cr.contract_id = 1 cr.force_save() - response = self.client.get('/rest/v2/datasets/1?expand_relation=data_catalog,contract') + response = self.client.get("/rest/v2/datasets/1?expand_relation=data_catalog,contract") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('catalog_json' in response.data['data_catalog'], True, response.data['data_catalog']) - self.assertEqual('contract_json' in response.data['contract'], True, response.data['contract']) + self.assertEqual( + "catalog_json" in response.data["data_catalog"], + True, + response.data["data_catalog"], + ) + self.assertEqual( + "contract_json" in response.data["contract"], + True, + response.data["contract"], + ) def test_strip_sensitive_fields(self): """ Strip fields not intended for general public """ + def _check_fields(obj): - for sensitive_field in ['email', 'telephone', 'phone']: - self.assertEqual(sensitive_field not in obj['research_dataset']['curator'][0], True, - 'field %s should have been stripped' % sensitive_field) + for sensitive_field in ["email", "telephone", "phone"]: + self.assertEqual( + sensitive_field not in obj["research_dataset"]["curator"][0], + True, + "field %s should have been stripped" % sensitive_field, + ) for cr in CatalogRecordV2.objects.filter(pk__in=(1, 2, 3)): - cr.research_dataset['curator'][0].update({ - 'email': 'email@mail.com', - 'phone': '123124', - 'telephone': '123124', - }) + cr.research_dataset["curator"][0].update( + { + "email": "email@mail.com", + "phone": "123124", + "telephone": "123124", + } + ) cr.force_save() self.client._credentials = {} - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) _check_fields(response.data) - response = self.client.get('/rest/v2/datasets') + response = self.client.get("/rest/v2/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - for obj in response.data['results']: + for obj in response.data["results"]: _check_fields(obj) - response = self.client.get('/rest/datasets?pagination=false') + response = self.client.get("/rest/datasets?pagination=false") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) for obj in response.data: _check_fields(obj) def _create_new_ds(self): - new_cr = self.client.get('/rest/v2/datasets/2').data - new_cr.pop('id') - new_cr['research_dataset'].pop('preferred_identifier') - new_cr.pop('identifier') + new_cr = self.client.get("/rest/v2/datasets/2").data + new_cr.pop("id") + new_cr["research_dataset"].pop("preferred_identifier") + new_cr.pop("identifier") self._use_http_authorization() - response = self.client.post('/rest/v2/datasets', new_cr, format='json') + response = self.client.post("/rest/v2/datasets", new_cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) @@ -268,207 +296,252 @@ class CatalogRecordApiReadBasicAuthorizationTests(CatalogRecordApiReadCommon): Basic read operations from authorization perspective """ - api_version = 'v2' + api_version = "v2" # THE OK TESTS - def test_returns_all_file_dir_info_for_open_catalog_record_if_no_authorization(self): + def test_returns_all_file_dir_info_for_open_catalog_record_if_no_authorization( + self, + ): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for open cr /rest/v2/datasets/ without # authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") - def test_returns_all_file_dir_info_for_login_catalog_record_if_no_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(use_login_access_type=True) + def test_returns_all_file_dir_info_for_login_catalog_record_if_no_authorization( + self, + ): + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + use_login_access_type=True + ) # Verify all file and dir details info is returned for login cr /rest/v2/datasets/ without authorization - self._assert_ok(login_cr_json, 'no') + self._assert_ok(login_cr_json, "no") @responses.activate - def test_returns_all_file_dir_info_for_open_catalog_record_if_owner_authorization(self): + def test_returns_all_file_dir_info_for_open_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) # Verify all file and dir details info is returned for open owner-owned cr /rest/v2/datasets/ with # owner authorization - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") @responses.activate - def test_returns_all_file_dir_info_for_login_catalog_record_if_owner_authorization(self): + def test_returns_all_file_dir_info_for_login_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(set_owner=True, - use_login_access_type=True) + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + set_owner=True, use_login_access_type=True + ) # Verify all file and dir details info is returned for login owner-owned cr /rest/v2/datasets/ with # owner authorization - self._assert_ok(login_cr_json, 'owner') + self._assert_ok(login_cr_json, "owner") - def test_returns_all_file_dir_info_for_restricted_catalog_record_if_service_authorization(self): + def test_returns_all_file_dir_info_for_restricted_catalog_record_if_service_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for restricted cr /rest/v2/datasets/ with # service authorization - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate - def test_returns_all_file_dir_info_for_restricted_catalog_record_if_owner_authorization(self): + def test_returns_all_file_dir_info_for_restricted_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify all file and dir details info is returned for restricted owner-owned cr /rest/v2/datasets/ with # owner authorization - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_all_file_dir_info_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_all_file_dir_info_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify all file and dir details info is returned for embargoed cr /rest/v2/datasets/ when # embargo date has been reached without authorization - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS - def test_returns_limited_file_dir_info_for_restricted_catalog_record_if_no_authorization(self): + def test_returns_limited_file_dir_info_for_restricted_catalog_record_if_no_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify limited file and dir info for restricted cr /rest/v2/datasets/ without authorization - self._assert_limited_or_no_file_dir_info(restricted_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(restricted_cr_json, "no") - def test_no_file_dir_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details( - False) + def test_no_file_dir_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify no file and dir info for embargoed cr /rest/v2/datasets/ when embargo date has not # been reached without authorization - self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, "no") def _assert_limited_or_no_file_dir_info(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/v2/datasets/{0}?include_user_metadata&file_details'.format(pk)) + response = self.client.get( + "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertFalse('identifier' in f) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertFalse("identifier" in f) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertFalse('identifier' in d) + self.assertFalse("identifier" in d) def _assert_ok(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/v2/datasets/{0}?include_user_metadata&file_details'.format(pk)) + response = self.client.get( + "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertTrue('identifier' in f) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertTrue("identifier" in f) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertTrue('identifier' in d) + self.assertTrue("identifier" in d) class CatalogRecordApiReadActorFilter(CatalogRecordApiReadCommon): - def test_agents_and_actors(self): # set test conditions cr = CatalogRecordV2.objects.get(pk=11) - cr.research_dataset['curator'] = [] - cr.research_dataset['curator'].append({ - '@type': 'Person', - 'name': 'Tarmo Termiitti', - 'member_of': { - 'identifier': 'org_identifier', - 'name': { - 'en': 'Unique Organization' - } + cr.research_dataset["curator"] = [] + cr.research_dataset["curator"].append( + { + "@type": "Person", + "name": "Tarmo Termiitti", + "member_of": { + "identifier": "org_identifier", + "name": {"en": "Unique Organization"}, + }, } - }) - cr.research_dataset['curator'].append({ '@type': 'Person', 'name': 'Keijo Kottarainen' }) - cr.research_dataset['curator'].append({ '@type': 'Person', 'name': 'Janus JƤrvinen' }) - cr.research_dataset['curator'].append({ '@type': 'Person', 'name': 'Laina Sakkonen' }) - cr.research_dataset['curator'].append({ - '@type': 'Person', - 'name': 'Kaisa Kuraattori', - 'member_of': { - 'identifier': 'org_identifier', - 'name': { - 'en': 'Happy Organization' - } + ) + cr.research_dataset["curator"].append({"@type": "Person", "name": "Keijo Kottarainen"}) + cr.research_dataset["curator"].append({"@type": "Person", "name": "Janus JƤrvinen"}) + cr.research_dataset["curator"].append({"@type": "Person", "name": "Laina Sakkonen"}) + cr.research_dataset["curator"].append( + { + "@type": "Person", + "name": "Kaisa Kuraattori", + "member_of": { + "identifier": "org_identifier", + "name": {"en": "Happy Organization"}, + }, } - }) - cr.research_dataset['creator'] = [] - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Unique Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Happy Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Sad Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': { 'en': 'Brilliant Organization'} }) - cr.research_dataset['creator'].append({ '@type': 'Organization', 'name': {'en': 'Wonderful Organization'} }) - cr.research_dataset['publisher']['name'] = {} - cr.research_dataset['publisher']['name'] = { 'fi': 'Originaali Organisaatio' } + ) + cr.research_dataset["creator"] = [] + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Unique Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Happy Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Sad Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Brilliant Organization"}} + ) + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Wonderful Organization"}} + ) + cr.research_dataset["publisher"]["name"] = {} + cr.research_dataset["publisher"]["name"] = {"fi": "Originaali Organisaatio"} cr.force_save() - response = self.client.get('/rest/v2/datasets?creator_organization=happy') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/v2/datasets?creator_organization=happy") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/v2/datasets?creator_organization=Brilliant Organization') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/v2/datasets?creator_organization=Brilliant Organization") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/v2/datasets?curator_person=termiitti') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/v2/datasets?curator_person=termiitti") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/v2/datasets?curator_person=Laina Sakkonen') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/v2/datasets?curator_person=Laina Sakkonen") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/v2/datasets?curator_organization=uniqu') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/v2/datasets?curator_organization=uniqu") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/v2/datasets?curator_organization=Happy Organization') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get("/rest/v2/datasets?curator_organization=Happy Organization") + self.assertEqual(len(response.data["results"]), 1, response.data) - response = self.client.get('/rest/v2/datasets?publisher_organization=originaali Organisaatio') - self.assertEqual(len(response.data['results']), 1, response.data) + response = self.client.get( + "/rest/v2/datasets?publisher_organization=originaali Organisaatio" + ) + self.assertEqual(len(response.data["results"]), 1, response.data) - query = 'curator_person=notfound&creator_organization=sad organ&condition_separator=AND' - response = self.client.get('/rest/v2/datasets?%s' % query) - self.assertEqual(len(response.data['results']), 0, response.data) + query = "curator_person=notfound&creator_organization=sad organ&condition_separator=AND" + response = self.client.get("/rest/v2/datasets?%s" % query) + self.assertEqual(len(response.data["results"]), 0, response.data) - query = 'curator_person=notfound&creator_organization=sad organ&condition_separator=OR' - response = self.client.get('/rest/v2/datasets?%s' % query) - self.assertEqual(len(response.data['results']), 1, response.data) + query = "curator_person=notfound&creator_organization=sad organ&condition_separator=OR" + response = self.client.get("/rest/v2/datasets?%s" % query) + self.assertEqual(len(response.data["results"]), 1, response.data) # test filter with pas filter """ Both organization and pas filters use internally Q-filters which are supposed to be AND'ed together. """ metax_user = settings.API_METAX_USER - self._use_http_authorization(username=metax_user['username'], password=metax_user['password']) + self._use_http_authorization( + username=metax_user["username"], password=metax_user["password"] + ) - response = self.client.get('/rest/v2/datasets?pas_filter=janus&creator_organization=sad organization') - self.assertEqual(len(response.data['results']), 1) + response = self.client.get( + "/rest/v2/datasets?pas_filter=janus&creator_organization=sad organization" + ) + self.assertEqual(len(response.data["results"]), 1) response = self.client.get( - '/rest/v2/datasets?preservation_state=10&pas_filter=kaisa&creator_organization=notfound' + "/rest/v2/datasets?preservation_state=10&pas_filter=kaisa&creator_organization=notfound" ) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) class CatalogRecordApiReadQueryParamsTests(CatalogRecordApiReadCommon): @@ -478,126 +551,154 @@ class CatalogRecordApiReadQueryParamsTests(CatalogRecordApiReadCommon): """ def test_read_catalog_record_search_by_curator_1(self): - response = self.client.get('/rest/v2/datasets?curator=id:of:curator:rahikainen') + response = self.client.get("/rest/v2/datasets?curator=id:of:curator:rahikainen") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') - self.assertEqual(response.data['results'][4]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) + self.assertEqual( + response.data["results"][4]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_2(self): - response = self.client.get('/rest/v2/datasets?curator=id:of:curator:jarski') + response = self.client.get("/rest/v2/datasets?curator=id:of:curator:jarski") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 4) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Jarski', - 'Curator name is not matching') - self.assertEqual(response.data['results'][3]['research_dataset']['curator'][0]['name'], 'Jarski', - 'Curator name is not matching') + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Jarski", + "Curator name is not matching", + ) + self.assertEqual( + response.data["results"][3]["research_dataset"]["curator"][0]["name"], + "Jarski", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_not_found_1(self): - response = self.client.get('/rest/v2/datasets?curator=Not Found') + response = self.client.get("/rest/v2/datasets?curator=Not Found") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) def test_read_catalog_record_search_by_curator_not_found_case_sensitivity(self): - response = self.client.get('/rest/v2/datasets?curator=id:of:curator:Rahikainen') + response = self.client.get("/rest/v2/datasets?curator=id:of:curator:Rahikainen") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) def test_read_catalog_record_search_by_curator_and_state_1(self): - response = self.client.get('/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=10') + response = self.client.get( + "/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=10" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 2) - self.assertEqual(response.data['results'][0]['id'], 2) - self.assertEqual(response.data['results'][0]['preservation_state'], 10) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') + self.assertEqual(len(response.data["results"]), 2) + self.assertEqual(response.data["results"][0]["id"], 2) + self.assertEqual(response.data["results"][0]["preservation_state"], 10) + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_and_state_2(self): - response = self.client.get('/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=40') + response = self.client.get( + "/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=40" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['id'], 4) - self.assertEqual(response.data['results'][0]['preservation_state'], 40) - self.assertEqual(response.data['results'][0]['research_dataset']['curator'][0]['name'], 'Rahikainen', - 'Curator name is not matching') + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["id"], 4) + self.assertEqual(response.data["results"][0]["preservation_state"], 40) + self.assertEqual( + response.data["results"][0]["research_dataset"]["curator"][0]["name"], + "Rahikainen", + "Curator name is not matching", + ) def test_read_catalog_record_search_by_curator_and_state_not_found(self): - response = self.client.get('/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=55') + response = self.client.get( + "/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=55" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 0) + self.assertEqual(len(response.data["results"]), 0) def test_read_catalog_record_search_by_creator_id(self): cr = CatalogRecordV2.objects.get(pk=1) - cr.user_created = '123' + cr.user_created = "123" cr.force_save() - response = self.client.get('/rest/v2/datasets?user_created=123') + response = self.client.get("/rest/v2/datasets?user_created=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['user_created'], '123') + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["user_created"], "123") def test_read_catalog_record_search_by_metadata_provider_user(self): - response = self.client.get('/rest/v2/datasets?metadata_provider_user=123') + response = self.client.get("/rest/v2/datasets?metadata_provider_user=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], 0) + self.assertEqual(response.data["count"], 0) cr = CatalogRecordV2.objects.get(pk=1) - cr.metadata_provider_user = '123' + cr.metadata_provider_user = "123" cr.force_save() - response = self.client.get('/rest/v2/datasets?metadata_provider_user=123') + response = self.client.get("/rest/v2/datasets?metadata_provider_user=123") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], 1) + self.assertEqual(response.data["count"], 1) def test_read_catalog_record_search_by_metadata_owner_org(self): - owner_org = 'org_id' + owner_org = "org_id" for cr in CatalogRecordV2.objects.filter(pk__in=[1, 2, 3]): cr.metadata_owner_org = owner_org cr.force_save() - owner_org_2 = 'org_id_2' + owner_org_2 = "org_id_2" for cr in CatalogRecordV2.objects.filter(pk__in=[4, 5, 6]): cr.metadata_owner_org = owner_org_2 cr.force_save() - response = self.client.get('/rest/v2/datasets?metadata_owner_org=%s' % owner_org) + response = self.client.get("/rest/v2/datasets?metadata_owner_org=%s" % owner_org) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 3) + self.assertEqual(len(response.data["results"]), 3) - response = self.client.get('/rest/v2/datasets?metadata_owner_org=%s,%s' % (owner_org, owner_org_2)) + response = self.client.get( + "/rest/v2/datasets?metadata_owner_org=%s,%s" % (owner_org, owner_org_2) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 6) + self.assertEqual(len(response.data["results"]), 6) def test_filter_by_contract_org_identifier(self): """ Test filtering by contract_org_identifier, which matches using iregex """ metax_user = settings.API_METAX_USER - self._use_http_authorization(username=metax_user['username'], password=metax_user['password']) + self._use_http_authorization( + username=metax_user["username"], password=metax_user["password"] + ) - response = self.client.get('/rest/v2/datasets?contract_org_identifier=2345') + response = self.client.get("/rest/v2/datasets?contract_org_identifier=2345") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) + self.assertEqual(len(response.data["results"]), 10) - response = self.client.get('/rest/v2/datasets?contract_org_identifier=1234567-1') - self.assertEqual(len(response.data['results']), 10) + response = self.client.get("/rest/v2/datasets?contract_org_identifier=1234567-1") + self.assertEqual(len(response.data["results"]), 10) - response = self.client.get('/rest/v2/datasets?contract_org_identifier=1234567-123') - self.assertEqual(len(response.data['results']), 0) + response = self.client.get("/rest/v2/datasets?contract_org_identifier=1234567-123") + self.assertEqual(len(response.data["results"]), 0) def test_filter_by_contract_org_identifier_is_restricted(self): - response = self.client.get('/rest/v2/datasets?contract_org_identifier=1234') + response = self.client.get("/rest/v2/datasets?contract_org_identifier=1234") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_read_catalog_record_search_by_data_catalog_id(self): from metax_api.models.data_catalog import DataCatalog # Create a new data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) - dc_id = 'original_dc_identifier' - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/v2/datacatalogs', dc, format="json") + dc = self._get_object_from_test_data("datacatalog", requested_index=0) + dc_id = "original_dc_identifier" + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/v2/datacatalogs", dc, format="json") # Set the new data catalog for a catalog record and store the catalog record cr = CatalogRecordV2.objects.get(pk=1) @@ -605,47 +706,47 @@ def test_read_catalog_record_search_by_data_catalog_id(self): cr.force_save() # Verify - response = self.client.get('/rest/v2/datasets?data_catalog={0}'.format(dc_id)) + response = self.client.get("/rest/v2/datasets?data_catalog={0}".format(dc_id)) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1) - self.assertEqual(response.data['results'][0]['data_catalog']['identifier'], dc_id) + self.assertEqual(len(response.data["results"]), 1) + self.assertEqual(response.data["results"][0]["data_catalog"]["identifier"], dc_id) def test_filter_by_deprecated(self): cr = CatalogRecordV2.objects.get(pk=1) cr.deprecated = True cr.force_save() - response = self.client.get('/rest/v2/datasets?deprecated=true') + response = self.client.get("/rest/v2/datasets?deprecated=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 1, response.data['results']) - self.assertTrue(response.data['results'][0]['deprecated'], response.data) + self.assertEqual(len(response.data["results"]), 1, response.data["results"]) + self.assertTrue(response.data["results"][0]["deprecated"], response.data) - response = self.client.get('/rest/v2/datasets?deprecated=false') + response = self.client.get("/rest/v2/datasets?deprecated=false") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results'][0]['id'], 2, response.data) - self.assertFalse(response.data['results'][0]['deprecated'], response.data) + self.assertEqual(response.data["results"][0]["id"], 2, response.data) + self.assertFalse(response.data["results"][0]["deprecated"], response.data) - response = self.client.get('/rest/v2/datasets?deprecated=badbool') + response = self.client.get("/rest/v2/datasets?deprecated=badbool") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_filter_by_api_version(self): # update one dataset to v2 so that we get non-zero values for that as well - response = self.client.put('/rest/v2/datasets', [self.cr_from_test_data], format='json') + response = self.client.put("/rest/v2/datasets", [self.cr_from_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) # both models return correct count because v2 model is just a proxy - v1_count = CatalogRecordV2.objects.filter(api_meta__contains={'version': 1}).count() + v1_count = CatalogRecordV2.objects.filter(api_meta__contains={"version": 1}).count() - response = self.client.get('/rest/v2/datasets?api_version=1') + response = self.client.get("/rest/v2/datasets?api_version=1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(v1_count, response.data['count'], response.data) + self.assertEqual(v1_count, response.data["count"], response.data) - v2_count = CatalogRecordV2.objects.filter(api_meta__contains={'version': 2}).count() + v2_count = CatalogRecordV2.objects.filter(api_meta__contains={"version": 2}).count() - response = self.client.get('/rest/v2/datasets?api_version=2') + response = self.client.get("/rest/v2/datasets?api_version=2") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(v2_count, response.data['count'], response.data) + self.assertEqual(v2_count, response.data["count"], response.data) def test_filter_by_legacy(self): self.create_legacy_data_catalogs() @@ -655,15 +756,16 @@ def test_filter_by_legacy(self): non_legacy_count = CatalogRecordV2.objects.exclude( data_catalog__catalog_json__identifier__in=settings.LEGACY_CATALOGS ).count() - response = self.client.get('/rest/v2/datasets') + response = self.client.get("/rest/v2/datasets") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(non_legacy_count, response.data['count'], response.data) + self.assertEqual(non_legacy_count, response.data["count"], response.data) # legacy datasets can be included with a parameter count_all = CatalogRecordV2.objects.count() - response = self.client.get('/rest/v2/datasets?include_legacy') + response = self.client.get("/rest/v2/datasets?include_legacy") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(count_all, response.data['count'], response.data) + self.assertEqual(count_all, response.data["count"], response.data) + class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): @@ -673,52 +775,63 @@ class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): def _create_dataset_with_doi(self): # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = settings.IDA_DATA_CATALOG_IDENTIFIER - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/v2/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/v2/datacatalogs", dc, format="json") # Create new cr by requesting a doi identifier - cr_json = self.client.get('/rest/v2/datasets/1').data - cr_json.pop('preservation_identifier', None) - cr_json.pop('identifier') - cr_json['research_dataset'].pop('preferred_identifier', None) - cr_json['research_dataset']['publisher'] = {'@type': 'Organization', 'name': {'und': 'Testaaja'}} - cr_json['research_dataset']['issued'] = '2010-01-01' - cr_json['data_catalog'] = dc_id - response = self.client.post('/rest/v2/datasets?pid_type=doi', cr_json, format="json") + cr_json = self.client.get("/rest/v2/datasets/1").data + cr_json.pop("preservation_identifier", None) + cr_json.pop("identifier") + cr_json["research_dataset"].pop("preferred_identifier", None) + cr_json["research_dataset"]["publisher"] = { + "@type": "Organization", + "name": {"und": "Testaaja"}, + } + cr_json["research_dataset"]["issued"] = "2010-01-01" + cr_json["data_catalog"] = dc_id + response = self.client.post("/rest/v2/datasets?pid_type=doi", cr_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data def test_read_dataset_xml_format_metax(self): - response = self.client.get('/rest/v2/datasets/1?dataset_format=metax') - self._check_dataset_xml_format_response(response, '%s' % doi[len('doi:'):] in response.data, - True, response.data) + self.assertEqual( + '%s' % doi[len("doi:") :] in response.data, + True, + response.data, + ) def test_read_dataset_format_datacite_odd_lang_abbrevation(self): cr = CatalogRecordV2.objects.get(pk=1) - cr.research_dataset['publisher'] = {'@type': 'Organization', 'name': {'zk': 'Testiorganisaatio'}} + cr.research_dataset["publisher"] = { + "@type": "Organization", + "name": {"zk": "Testiorganisaatio"}, + } cr.force_save() - response = self.client.get('/rest/v2/datasets/1?dataset_format=fairdata_datacite') + response = self.client.get("/rest/v2/datasets/1?dataset_format=fairdata_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_read_dataset_format_dummy_datacite_doi(self): @@ -727,28 +840,46 @@ def test_read_dataset_format_dummy_datacite_doi(self): and identifier value prefixed with 10.0/. If a real DOI is available in the dataset, then dummy should NOT be returned. """ - pid = self.client.get('/rest/v2/datasets/12').data['research_dataset']['preferred_identifier'] - self.assertEqual(pid.startswith('doi:'), False, pid) + pid = self.client.get("/rest/v2/datasets/12").data["research_dataset"][ + "preferred_identifier" + ] + self.assertEqual(pid.startswith("doi:"), False, pid) - for dataset_format in ['datacite', 'fairdata_datacite']: - response = self.client.get('/rest/v2/datasets/12?dataset_format=%s&dummy_doi=true' % dataset_format) + for dataset_format in ["datacite", "fairdata_datacite"]: + response = self.client.get( + "/rest/v2/datasets/12?dataset_format=%s&dummy_doi=true" % dataset_format + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('' in response.data, True, response.data) - self.assertEqual('10.0/%s' % pid in response.data, True, response.data) + self.assertEqual( + '' in response.data, + True, + response.data, + ) + self.assertEqual("10.0/%s" % pid in response.data, True, response.data) # ensure if a real doi exists, then dummy should never be returned cr = self._create_dataset_with_doi() - response = self.client.get('/rest/v2/datasets/%d?dataset_format=datacite&dummy_doi=true' % cr['id']) + response = self.client.get( + "/rest/v2/datasets/%d?dataset_format=datacite&dummy_doi=true" % cr["id"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual('' in response.data, True, response.data) - self.assertEqual(cr['preservation_identifier'][len('doi:'):] in response.data, True, response.data) - self.assertEqual('10.0/%s' % pid in response.data, False, response.data) + self.assertEqual( + cr["preservation_identifier"][len("doi:") :] in response.data, + True, + response.data, + ) + self.assertEqual("10.0/%s" % pid in response.data, False, response.data) def _check_dataset_xml_format_response(self, response, element_name): self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('content-type' in response._headers, True, response._headers) - self.assertEqual('application/xml' in response._headers['content-type'][1], True, response._headers) - self.assertEqual(' 6) @@ -801,35 +934,64 @@ class CatalogRecordApiReadPopulateFileInfoTests(CatalogRecordApiReadCommon): def test_file_details_populated(self): # without the flag nothing should happen - response = self.client.get('/rest/v2/datasets/1?include_user_metadata') - self.assertEqual(all('details' not in f for f in response.data['research_dataset']['files']), True) + response = self.client.get("/rest/v2/datasets/1?include_user_metadata") + self.assertEqual( + all("details" not in f for f in response.data["research_dataset"]["files"]), + True, + ) - response = self.client.get('/rest/v2/datasets/1?include_user_metadata&file_details') + response = self.client.get("/rest/v2/datasets/1?include_user_metadata&file_details") self.assertEqual(response.status_code, status.HTTP_200_OK) # check all fiels have the extra key 'details', and all details have the key 'identifier'. # presumably the details were then filled in. - self.assertEqual(all('details' in f for f in response.data['research_dataset']['files']), True) - self.assertEqual(all('identifier' in f['details'] for f in response.data['research_dataset']['files']), True) + self.assertEqual( + all("details" in f for f in response.data["research_dataset"]["files"]), + True, + ) + self.assertEqual( + all("identifier" in f["details"] for f in response.data["research_dataset"]["files"]), + True, + ) def test_directory_details_populated(self): # id 11 is one of the example datasets with full details. they should have a couple # of directories attached. CatalogRecordV2.objects.get(pk=11).calculate_directory_byte_sizes_and_file_counts() - response = self.client.get('/rest/v2/datasets/11?include_user_metadata&file_details') + response = self.client.get("/rest/v2/datasets/11?include_user_metadata&file_details") self.assertEqual(response.status_code, status.HTTP_200_OK) # check all dirs have the extra key 'details', and all details have the key 'identifier'. # presumably the details were then filled in. - self.assertEqual(all('details' in f for f in response.data['research_dataset']['directories']), True) - self.assertEqual(all('identifier' in f['details'] for f in response.data['research_dataset']['directories']), - True) + self.assertEqual( + all("details" in f for f in response.data["research_dataset"]["directories"]), + True, + ) + self.assertEqual( + all( + "identifier" in f["details"] + for f in response.data["research_dataset"]["directories"] + ), + True, + ) # additionally check that file counts and total byte sizes are as expected - self.assertEqual(response.data['research_dataset']['directories'][0]['details']['byte_size'], 21000) - self.assertEqual(response.data['research_dataset']['directories'][1]['details']['byte_size'], 21000) - self.assertEqual(response.data['research_dataset']['directories'][0]['details']['file_count'], 20) - self.assertEqual(response.data['research_dataset']['directories'][1]['details']['file_count'], 20) + self.assertEqual( + response.data["research_dataset"]["directories"][0]["details"]["byte_size"], + 21000, + ) + self.assertEqual( + response.data["research_dataset"]["directories"][1]["details"]["byte_size"], + 21000, + ) + self.assertEqual( + response.data["research_dataset"]["directories"][0]["details"]["file_count"], + 20, + ) + self.assertEqual( + response.data["research_dataset"]["directories"][1]["details"]["file_count"], + 20, + ) def test_file_details_for_deprecated_datasets(self): """ @@ -842,12 +1004,15 @@ def test_file_details_for_deprecated_datasets(self): cr = CatalogRecordV2.objects.get(pk=11) file_identifiers = File.objects.filter( - project_identifier=cr.files.all()[0].project_identifier).values_list('identifier', flat=True) + project_identifier=cr.files.all()[0].project_identifier + ).values_list("identifier", flat=True) - response = self.client.delete('/rest/v2/files', data=file_identifiers, format='json') + response = self.client.delete("/rest/v2/files", data=file_identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/v2/datasets/11?include_user_metadata&file_details', format='json') + response = self.client.get( + "/rest/v2/datasets/11?include_user_metadata&file_details", format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -864,21 +1029,21 @@ def test_returns_all_details_for_open_catalog_record_if_no_authorization(self): # Verify all file and dir details info is returned for open cr /rest/v2/datasets/?file_details without # authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_all_details_for_login_catalog_record_if_no_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for open cr /rest/v2/datasets/?file_details without # authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_all_details_for_open_catalog_record_if_service_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for open cr /rest/v2/datasets/?file_details with # service authorization - self._assert_ok(open_cr_json, 'service') + self._assert_ok(open_cr_json, "service") @responses.activate def test_returns_all_details_for_open_catalog_record_if_owner_authorization(self): @@ -887,88 +1052,108 @@ def test_returns_all_details_for_open_catalog_record_if_owner_authorization(self # Verify all file and dir details info is returned for open owner-owned # cr /rest/v2/datasets/?file_details with owner authorization - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") - def test_returns_all_details_for_restricted_catalog_record_if_service_authorization(self): + def test_returns_all_details_for_restricted_catalog_record_if_service_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify all file and dir details info is returned for restricted cr /rest/v2/datasets/?file_details with # service authorization - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate - def test_returns_all_details_for_restricted_catalog_record_if_owner_authorization(self): + def test_returns_all_details_for_restricted_catalog_record_if_owner_authorization( + self, + ): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify all file and dir details info is returned for restricted owner-owned cr # /rest/v2/datasets/?file_details with owner authorization - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_all_details_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_all_details_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify all file and dir details info is returned for embargoed cr /rest/v2/datasets/?file_details when # embargo date has been reached without authorization - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS - def test_returns_limited_info_for_restricted_catalog_record_if_no_authorization(self): + def test_returns_limited_info_for_restricted_catalog_record_if_no_authorization( + self, + ): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify limited file and dir info for restricted cr /rest/v2/datasets/?file_details without authorization - self._assert_limited_or_no_file_dir_info(restricted_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(restricted_cr_json, "no") - def test_returns_limited_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + def test_returns_limited_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify limited file and dir info for embargoed cr /rest/v2/datasets/?file_details when embargo date has # not been reached without authorization - self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, 'no') + self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, "no") def _assert_limited_or_no_file_dir_info(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/v2/datasets/{0}?include_user_metadata&file_details'.format(pk)) + response = self.client.get( + "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertTrue(len(f['details'].keys()) < 5) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertTrue(len(f["details"].keys()) < 5) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertTrue(len(d['details'].keys()) < 5) + self.assertTrue(len(d["details"].keys()) < 5) def _assert_ok(self, cr_json, credentials_type): self._set_http_authorization(credentials_type) - file_amt = len(cr_json['research_dataset']['files']) - dir_amt = len(cr_json['research_dataset']['directories']) - pk = cr_json['id'] + file_amt = len(cr_json["research_dataset"]["files"]) + dir_amt = len(cr_json["research_dataset"]["directories"]) + pk = cr_json["id"] - response = self.client.get('/rest/v2/datasets/{0}?include_user_metadata&file_details'.format(pk)) + response = self.client.get( + "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files']), file_amt) - self.assertEqual(len(response.data['research_dataset']['directories']), dir_amt) + self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) + self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - for f in response.data['research_dataset']['files']: - self.assertTrue('details' in f) + for f in response.data["research_dataset"]["files"]: + self.assertTrue("details" in f) # The below assert is a bit arbitrary - self.assertTrue(len(f['details'].keys()) > 5) - for d in response.data['research_dataset']['directories']: - self.assertTrue('details' in d) + self.assertTrue(len(f["details"].keys()) > 5) + for d in response.data["research_dataset"]["directories"]: + self.assertTrue("details" in d) # The below assert is a bit arbitrary - self.assertTrue(len(d['details'].keys()) > 5) + self.assertTrue(len(d["details"].keys()) > 5) class CatalogRecordApiReadFiles(CatalogRecordApiReadCommon): @@ -979,7 +1164,7 @@ class CatalogRecordApiReadFiles(CatalogRecordApiReadCommon): def test_get_files(self): file_count = CatalogRecordV2.objects.get(pk=1).files.count() - response = self.client.get('/rest/v2/datasets/1/files') + response = self.client.get("/rest/v2/datasets/1/files") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), file_count) @@ -987,19 +1172,19 @@ def test_get_files_specified_fields_only(self): """ Test use of query parameter ?file_fields=x,y,z """ - response = self.client.get('/rest/v2/datasets/1/files?file_fields=identifier,file_path') + response = self.client.get("/rest/v2/datasets/1/files?file_fields=identifier,file_path") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data[0].keys()), 2) - self.assertEqual('identifier' in response.data[0], True) - self.assertEqual('file_path' in response.data[0], True) + self.assertEqual("identifier" in response.data[0], True) + self.assertEqual("file_path" in response.data[0], True) def test_removed_query_param(self): """ Test use of query parameter removed_files=bool in /datasets/pid/files, which should return only deleted files. """ - response = self.client.get('/rest/v2/datasets/1/files') - file_ids_before = set([ f['id'] for f in response.data ]) + response = self.client.get("/rest/v2/datasets/1/files") + file_ids_before = set([f["id"] for f in response.data]) obj = File.objects.get(pk=1) obj.removed = True obj.force_save() @@ -1007,14 +1192,14 @@ def test_removed_query_param(self): obj2.removed = True obj2.force_save() - response = self.client.get('/rest/v2/datasets/1/files') + response = self.client.get("/rest/v2/datasets/1/files") self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) self.assertEqual(len(response.data), 0) - response = self.client.get('/rest/v2/datasets/1/files?removed_files=true') + response = self.client.get("/rest/v2/datasets/1/files?removed_files=true") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), len(file_ids_before)) - self.assertEqual(file_ids_before, set([ f['id'] for f in response.data ])) + self.assertEqual(file_ids_before, set([f["id"] for f in response.data])) class CatalogRecordApiReadFilesAuthorization(CatalogRecordApiReadCommon): @@ -1029,13 +1214,13 @@ def test_returns_ok_for_open_catalog_record_if_no_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify open dataset /rest/v2/datasets//files returns all the files even without authorization - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_ok_for_open_catalog_record_if_service_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify open dataset /rest/v2/datasets//files returns all the files with service authorization - self._assert_ok(open_cr_json, 'service') + self._assert_ok(open_cr_json, "service") @responses.activate def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): @@ -1043,29 +1228,35 @@ def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) # Verify open owner-owned dataset /rest/v2/datasets//files returns all the files with owner authorization - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") def test_returns_ok_for_restricted_catalog_record_if_service_authorization(self): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify restricted dataset /rest/v2/datasets//files returns all the files with service authorization - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate def test_returns_ok_for_restricted_catalog_record_if_owner_authorization(self): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify restricted owner-owned dataset /rest/v2/datasets//files returns all the files with # owner authorization - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify restricted dataset /rest/v2/datasets//files returns ok when embargo date has been reached without # authorization - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS @@ -1073,26 +1264,32 @@ def test_returns_forbidden_for_restricted_catalog_record_if_no_authorization(sel restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify restricted dataset /rest/v2/datasets//files returns forbidden without authorization - self._assert_forbidden(restricted_cr_json, 'no') + self._assert_forbidden(restricted_cr_json, "no") - def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify restricted dataset /rest/v2/datasets//files returns forbidden when embargo # date has not been reached - self._assert_forbidden(not_available_embargoed_cr_json, 'no') + self._assert_forbidden(not_available_embargoed_cr_json, "no") def _assert_forbidden(self, cr_json, credentials_type): - pk = cr_json['id'] + pk = cr_json["id"] self._set_http_authorization(credentials_type) - response = self.client.get('/rest/v2/datasets/{0}/files'.format(pk)) + response = self.client.get("/rest/v2/datasets/{0}/files".format(pk)) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def _assert_ok(self, cr_json, credentials_type): - pk = cr_json['id'] + pk = cr_json["id"] self._set_http_authorization(credentials_type) - rd = cr_json['research_dataset'] - file_amt = len(rd['files']) + sum(int(d['details']['file_count']) for d in rd['directories']) - response = self.client.get('/rest/v2/datasets/{0}/files'.format(pk)) + rd = cr_json["research_dataset"] + file_amt = len(rd["files"]) + sum( + int(d["details"]["file_count"]) for d in rd["directories"] + ) + response = self.client.get("/rest/v2/datasets/{0}/files".format(pk)) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), file_amt) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py b/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py index 5032cb2b..6233d399 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py @@ -33,26 +33,28 @@ def test_organization_name_is_required(self): # simple case cr = deepcopy(self.cr_full_ida_test_data) - cr['research_dataset']['curator'] = [{ - '@type': 'Organization', - 'identifier': 'not found!', - # no name! - }] - response = self.client.post('/rest/v2/datasets', cr, format="json") + cr["research_dataset"]["curator"] = [ + { + "@type": "Organization", + "identifier": "not found!", + # no name! + } + ] + response = self.client.post("/rest/v2/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # a more complex case. ensure organizations are found from deep structures cr = deepcopy(self.cr_full_ida_test_data) - org = cr['research_dataset']['provenance'][0]['was_associated_with'][0] - del org['name'] # should cause the error - org['@type'] = 'Organization' - org['identifier'] = 'not found!' - response = self.client.post('/rest/v2/datasets', cr, format="json") + org = cr["research_dataset"]["provenance"][0]["was_associated_with"][0] + del org["name"] # should cause the error + org["@type"] = "Organization" + org["identifier"] = "not found!" + response = self.client.post("/rest/v2/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # try again. should be ok - org['identifier'] = 'http://uri.suomi.fi/codelist/fairdata/organization/code/10076' - response = self.client.post('/rest/v2/datasets', cr, format="json") + org["identifier"] = "http://uri.suomi.fi/codelist/fairdata/organization/code/10076" + response = self.client.post("/rest/v2/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_catalog_record_reference_data_missing_ok(self): @@ -61,11 +63,14 @@ def test_catalog_record_reference_data_missing_ok(self): cache for whatever reason, and successfully finish the request """ cache = RedisClient() - cache.delete('reference_data') - self.assertEqual(cache.get('reference_data', master=True), None, - 'cache ref data should be missing after cache.delete()') + cache.delete("reference_data") + self.assertEqual( + cache.get("reference_data", master=True), + None, + "cache ref data should be missing after cache.delete()", + ) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_missing_license_identifier_ok(self): @@ -73,66 +78,80 @@ def test_missing_license_identifier_ok(self): Missing license identifier is ok if url is provided. Works on att and ida datasets """ - rd_ida = self.cr_full_ida_test_data['research_dataset'] - rd_ida['access_rights']['license'] = [{ - 'license': "http://a.very.nice.custom/url" - }] - response = self.client.post('/rest/v2/datasets', self.cr_full_ida_test_data, format="json") + rd_ida = self.cr_full_ida_test_data["research_dataset"] + rd_ida["access_rights"]["license"] = [{"license": "http://a.very.nice.custom/url"}] + response = self.client.post("/rest/v2/datasets", self.cr_full_ida_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data['research_dataset']['access_rights']['license'][0]), 1, response.data) + self.assertEqual( + len(response.data["research_dataset"]["access_rights"]["license"][0]), + 1, + response.data, + ) - rd_att = self.cr_full_att_test_data['research_dataset'] - rd_att['access_rights']['license'] = [{ - 'license': "http://also.fine.custom/uri", - 'description': { - 'en': "This is very informative description of this custom license." + rd_att = self.cr_full_att_test_data["research_dataset"] + rd_att["access_rights"]["license"] = [ + { + "license": "http://also.fine.custom/uri", + "description": { + "en": "This is very informative description of this custom license." + }, } - }] - rd_att['remote_resources'][0]['license'] = [{ - 'license': "http://cool.remote.uri", - 'description': { - 'en': "Proof that also remote licenses can be used with custom urls." + ] + rd_att["remote_resources"][0]["license"] = [ + { + "license": "http://cool.remote.uri", + "description": { + "en": "Proof that also remote licenses can be used with custom urls." + }, } - }] - response = self.client.post('/rest/v2/datasets', self.cr_full_att_test_data, format="json") + ] + response = self.client.post("/rest/v2/datasets", self.cr_full_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data['research_dataset']['access_rights']['license'][0]), 2, response.data) - self.assertEqual(len(response.data['research_dataset']['remote_resources'][0]['license'][0]), 2, response.data) + self.assertEqual( + len(response.data["research_dataset"]["access_rights"]["license"][0]), + 2, + response.data, + ) + self.assertEqual( + len(response.data["research_dataset"]["remote_resources"][0]["license"][0]), + 2, + response.data, + ) def test_create_catalog_record_with_invalid_reference_data(self): - rd_ida = self.cr_full_ida_test_data['research_dataset'] - rd_ida['theme'][0]['identifier'] = 'nonexisting' - rd_ida['field_of_science'][0]['identifier'] = 'nonexisting' - rd_ida['language'][0]['identifier'] = 'nonexisting' - rd_ida['access_rights']['access_type']['identifier'] = 'nonexisting' - rd_ida['access_rights']['license'][0]['identifier'] = 'nonexisting' - rd_ida['other_identifier'][0]['type']['identifier'] = 'nonexisting' - rd_ida['spatial'][0]['place_uri']['identifier'] = 'nonexisting' - rd_ida['files'][0]['file_type']['identifier'] = 'nonexisting' - rd_ida['files'][0]['use_category']['identifier'] = 'nonexisting' - rd_ida['infrastructure'][0]['identifier'] = 'nonexisting' - rd_ida['creator'][0]['contributor_role'][0]['identifier'] = 'nonexisting' - rd_ida['curator'][0]['contributor_type'][0]['identifier'] = 'nonexisting' - rd_ida['is_output_of'][0]['funder_type']['identifier'] = 'nonexisting' - rd_ida['directories'][0]['use_category']['identifier'] = 'nonexisting' - rd_ida['relation'][0]['relation_type']['identifier'] = 'nonexisting' - rd_ida['relation'][0]['entity']['type']['identifier'] = 'nonexisting' - rd_ida['provenance'][0]['lifecycle_event']['identifier'] = 'nonexisting' - rd_ida['provenance'][1]['preservation_event']['identifier'] = 'nonexisting' - rd_ida['provenance'][0]['event_outcome']['identifier'] = 'nonexisting' - response = self.client.post('/rest/v2/datasets', self.cr_full_ida_test_data, format="json") + rd_ida = self.cr_full_ida_test_data["research_dataset"] + rd_ida["theme"][0]["identifier"] = "nonexisting" + rd_ida["field_of_science"][0]["identifier"] = "nonexisting" + rd_ida["language"][0]["identifier"] = "nonexisting" + rd_ida["access_rights"]["access_type"]["identifier"] = "nonexisting" + rd_ida["access_rights"]["license"][0]["identifier"] = "nonexisting" + rd_ida["other_identifier"][0]["type"]["identifier"] = "nonexisting" + rd_ida["spatial"][0]["place_uri"]["identifier"] = "nonexisting" + rd_ida["files"][0]["file_type"]["identifier"] = "nonexisting" + rd_ida["files"][0]["use_category"]["identifier"] = "nonexisting" + rd_ida["infrastructure"][0]["identifier"] = "nonexisting" + rd_ida["creator"][0]["contributor_role"][0]["identifier"] = "nonexisting" + rd_ida["curator"][0]["contributor_type"][0]["identifier"] = "nonexisting" + rd_ida["is_output_of"][0]["funder_type"]["identifier"] = "nonexisting" + rd_ida["directories"][0]["use_category"]["identifier"] = "nonexisting" + rd_ida["relation"][0]["relation_type"]["identifier"] = "nonexisting" + rd_ida["relation"][0]["entity"]["type"]["identifier"] = "nonexisting" + rd_ida["provenance"][0]["lifecycle_event"]["identifier"] = "nonexisting" + rd_ida["provenance"][1]["preservation_event"]["identifier"] = "nonexisting" + rd_ida["provenance"][0]["event_outcome"]["identifier"] = "nonexisting" + response = self.client.post("/rest/v2/datasets", self.cr_full_ida_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True) - self.assertEqual(len(response.data['research_dataset']), 19) - - rd_att = self.cr_full_att_test_data['research_dataset'] - rd_att['remote_resources'][0]['license'][0]['identifier'] = 'nonexisting' - rd_att['remote_resources'][1]['resource_type']['identifier'] = 'nonexisting' - rd_att['remote_resources'][0]['use_category']['identifier'] = 'nonexisting' - response = self.client.post('/rest/v2/datasets', self.cr_full_att_test_data, format="json") + self.assertEqual("research_dataset" in response.data.keys(), True) + self.assertEqual(len(response.data["research_dataset"]), 19) + + rd_att = self.cr_full_att_test_data["research_dataset"] + rd_att["remote_resources"][0]["license"][0]["identifier"] = "nonexisting" + rd_att["remote_resources"][1]["resource_type"]["identifier"] = "nonexisting" + rd_att["remote_resources"][0]["use_category"]["identifier"] = "nonexisting" + response = self.client.post("/rest/v2/datasets", self.cr_full_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True) - self.assertEqual(len(response.data['research_dataset']), 3) + self.assertEqual("research_dataset" in response.data.keys(), True) + self.assertEqual(len(response.data["research_dataset"]), 3) def test_create_catalog_record_populate_fields_from_reference_data(self): """ @@ -144,101 +163,115 @@ def test_create_catalog_record_populate_fields_from_reference_data(self): """ cache = RedisClient() rf = RDM.get_reference_data(cache) - refdata = rf['reference_data'] - orgdata = rf['organization_data'] + refdata = rf["reference_data"] + orgdata = rf["organization_data"] refs = {} data_types = [ - 'access_type', - 'restriction_grounds', - 'field_of_science', - 'identifier_type', - 'keyword', - 'language', - 'license', - 'location', - 'resource_type', - 'file_type', - 'use_category', - 'research_infra', - 'contributor_role', - 'contributor_type', - 'funder_type', - 'relation_type', - 'lifecycle_event', - 'preservation_event', - 'event_outcome' + "access_type", + "restriction_grounds", + "field_of_science", + "identifier_type", + "keyword", + "language", + "license", + "location", + "resource_type", + "file_type", + "use_category", + "research_infra", + "contributor_role", + "contributor_type", + "funder_type", + "relation_type", + "lifecycle_event", + "preservation_event", + "event_outcome", ] # the values in these selected entries will be used throghout the rest of the test case for dtype in data_types: - if dtype == 'location': - entry = next((obj for obj in refdata[dtype] if obj.get('wkt', False)), None) + if dtype == "location": + entry = next((obj for obj in refdata[dtype] if obj.get("wkt", False)), None) self.assertTrue(entry is not None) else: entry = refdata[dtype][1] refs[dtype] = { - 'code': entry['code'], - 'uri': entry['uri'], - 'label': entry.get('label', None), - 'wkt': entry.get('wkt', None), - 'scheme': entry.get('scheme', None) + "code": entry["code"], + "uri": entry["uri"], + "label": entry.get("label", None), + "wkt": entry.get("wkt", None), + "scheme": entry.get("scheme", None), } - refs['organization'] = { - 'uri': orgdata['organization'][0]['uri'], - 'code': orgdata['organization'][0]['code'], - 'label': orgdata['organization'][0]['label'] + refs["organization"] = { + "uri": orgdata["organization"][0]["uri"], + "code": orgdata["organization"][0]["code"], + "label": orgdata["organization"][0]["label"], } # replace the relations with objects that have only the identifier set with code as value, # to easily check that label was populated (= that it appeared in the dataset after create) # without knowing its original value from the generated test data - rd_ida = self.cr_full_ida_test_data['research_dataset'] - rd_ida['theme'][0] = {'identifier': refs['keyword']['code']} - rd_ida['field_of_science'][0] = {'identifier': refs['field_of_science']['code']} - rd_ida['language'][0] = {'identifier': refs['language']['code']} - rd_ida['access_rights']['access_type'] = {'identifier': refs['access_type']['code']} - rd_ida['access_rights']['restriction_grounds'][0] = {'identifier': refs['restriction_grounds']['code']} - rd_ida['access_rights']['license'][0] = {'identifier': refs['license']['code']} - rd_ida['other_identifier'][0]['type'] = {'identifier': refs['identifier_type']['code']} - rd_ida['spatial'][0]['place_uri'] = {'identifier': refs['location']['code']} - rd_ida['files'][0]['file_type'] = {'identifier': refs['file_type']['code']} - rd_ida['files'][0]['use_category'] = {'identifier': refs['use_category']['code']} - rd_ida['directories'][0]['use_category'] = {'identifier': refs['use_category']['code']} - rd_ida['infrastructure'][0] = {'identifier': refs['research_infra']['code']} - rd_ida['creator'][0]['contributor_role'][0] = {'identifier': refs['contributor_role']['code']} - rd_ida['curator'][0]['contributor_type'][0] = {'identifier': refs['contributor_type']['code']} - rd_ida['is_output_of'][0]['funder_type'] = {'identifier': refs['funder_type']['code']} - rd_ida['relation'][0]['relation_type'] = {'identifier': refs['relation_type']['code']} - rd_ida['relation'][0]['entity']['type'] = {'identifier': refs['resource_type']['code']} - rd_ida['provenance'][0]['lifecycle_event'] = {'identifier': refs['lifecycle_event']['code']} - rd_ida['provenance'][1]['preservation_event'] = {'identifier': refs['preservation_event']['code']} - rd_ida['provenance'][0]['event_outcome'] = {'identifier': refs['event_outcome']['code']} + rd_ida = self.cr_full_ida_test_data["research_dataset"] + rd_ida["theme"][0] = {"identifier": refs["keyword"]["code"]} + rd_ida["field_of_science"][0] = {"identifier": refs["field_of_science"]["code"]} + rd_ida["language"][0] = {"identifier": refs["language"]["code"]} + rd_ida["access_rights"]["access_type"] = {"identifier": refs["access_type"]["code"]} + rd_ida["access_rights"]["restriction_grounds"][0] = { + "identifier": refs["restriction_grounds"]["code"] + } + rd_ida["access_rights"]["license"][0] = {"identifier": refs["license"]["code"]} + rd_ida["other_identifier"][0]["type"] = {"identifier": refs["identifier_type"]["code"]} + rd_ida["spatial"][0]["place_uri"] = {"identifier": refs["location"]["code"]} + rd_ida["files"][0]["file_type"] = {"identifier": refs["file_type"]["code"]} + rd_ida["files"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} + rd_ida["directories"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} + rd_ida["infrastructure"][0] = {"identifier": refs["research_infra"]["code"]} + rd_ida["creator"][0]["contributor_role"][0] = { + "identifier": refs["contributor_role"]["code"] + } + rd_ida["curator"][0]["contributor_type"][0] = { + "identifier": refs["contributor_type"]["code"] + } + rd_ida["is_output_of"][0]["funder_type"] = {"identifier": refs["funder_type"]["code"]} + rd_ida["relation"][0]["relation_type"] = {"identifier": refs["relation_type"]["code"]} + rd_ida["relation"][0]["entity"]["type"] = {"identifier": refs["resource_type"]["code"]} + rd_ida["provenance"][0]["lifecycle_event"] = {"identifier": refs["lifecycle_event"]["code"]} + rd_ida["provenance"][1]["preservation_event"] = { + "identifier": refs["preservation_event"]["code"] + } + rd_ida["provenance"][0]["event_outcome"] = {"identifier": refs["event_outcome"]["code"]} # these have other required fields, so only update the identifier with code - rd_ida['is_output_of'][0]['source_organization'][0]['identifier'] = refs['organization']['code'] - rd_ida['is_output_of'][0]['has_funding_agency'][0]['identifier'] = refs['organization']['code'] - rd_ida['other_identifier'][0]['provider']['identifier'] = refs['organization']['code'] - rd_ida['contributor'][0]['member_of']['identifier'] = refs['organization']['code'] - rd_ida['creator'][0]['member_of']['identifier'] = refs['organization']['code'] - rd_ida['curator'][0]['is_part_of']['identifier'] = refs['organization']['code'] - rd_ida['publisher']['is_part_of']['identifier'] = refs['organization']['code'] - rd_ida['rights_holder'][0]['is_part_of']['identifier'] = refs['organization']['code'] + rd_ida["is_output_of"][0]["source_organization"][0]["identifier"] = refs["organization"][ + "code" + ] + rd_ida["is_output_of"][0]["has_funding_agency"][0]["identifier"] = refs["organization"][ + "code" + ] + rd_ida["other_identifier"][0]["provider"]["identifier"] = refs["organization"]["code"] + rd_ida["contributor"][0]["member_of"]["identifier"] = refs["organization"]["code"] + rd_ida["creator"][0]["member_of"]["identifier"] = refs["organization"]["code"] + rd_ida["curator"][0]["is_part_of"]["identifier"] = refs["organization"]["code"] + rd_ida["publisher"]["is_part_of"]["identifier"] = refs["organization"]["code"] + rd_ida["rights_holder"][0]["is_part_of"]["identifier"] = refs["organization"]["code"] # Other type of reference data populations - orig_wkt_value = rd_ida['spatial'][0]['as_wkt'][0] - rd_ida['spatial'][0]['place_uri']['identifier'] = refs['location']['code'] - rd_ida['spatial'][1]['as_wkt'] = [] - rd_ida['spatial'][1]['place_uri']['identifier'] = refs['location']['code'] + orig_wkt_value = rd_ida["spatial"][0]["as_wkt"][0] + rd_ida["spatial"][0]["place_uri"]["identifier"] = refs["location"]["code"] + rd_ida["spatial"][1]["as_wkt"] = [] + rd_ida["spatial"][1]["place_uri"]["identifier"] = refs["location"]["code"] response = self.client.post( - '/rest/v2/datasets?include_user_metadata', self.cr_full_ida_test_data, format="json" + "/rest/v2/datasets?include_user_metadata", + self.cr_full_ida_test_data, + format="json", ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True) + self.assertEqual("research_dataset" in response.data.keys(), True) - new_rd_ida = response.data['research_dataset'] + new_rd_ida = response.data["research_dataset"] self._assert_uri_copied_to_identifier(refs, new_rd_ida) self._assert_label_copied_to_pref_label(refs, new_rd_ida) self._assert_label_copied_to_title(refs, new_rd_ida) @@ -246,167 +279,379 @@ def test_create_catalog_record_populate_fields_from_reference_data(self): # Assert if spatial as_wkt field has been populated with a value from ref data which has wkt value having # condition that the user has not given own coordinates in the as_wkt field - self.assertEqual(orig_wkt_value, new_rd_ida['spatial'][0]['as_wkt'][0]) - self.assertEqual(refs['location']['wkt'], new_rd_ida['spatial'][1]['as_wkt'][0]) + self.assertEqual(orig_wkt_value, new_rd_ida["spatial"][0]["as_wkt"][0]) + self.assertEqual(refs["location"]["wkt"], new_rd_ida["spatial"][1]["as_wkt"][0]) # rd from att data catalog - rd_att = self.cr_full_att_test_data['research_dataset'] - rd_att['remote_resources'][1]['resource_type'] = {'identifier': refs['resource_type']['code']} - rd_att['remote_resources'][0]['use_category'] = {'identifier': refs['use_category']['code']} - rd_att['remote_resources'][0]['license'][0] = {'identifier': refs['license']['code']} + rd_att = self.cr_full_att_test_data["research_dataset"] + rd_att["remote_resources"][1]["resource_type"] = { + "identifier": refs["resource_type"]["code"] + } + rd_att["remote_resources"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} + rd_att["remote_resources"][0]["license"][0] = {"identifier": refs["license"]["code"]} # Assert remote resources related reference datas - response = self.client.post('/rest/v2/datasets', self.cr_full_att_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_full_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True) - new_rd_att = response.data['research_dataset'] + self.assertEqual("research_dataset" in response.data.keys(), True) + new_rd_att = response.data["research_dataset"] self._assert_att_remote_resource_items(refs, new_rd_att) def _assert_att_remote_resource_items(self, refs, new_rd): - self.assertEqual(refs['resource_type']['uri'], new_rd['remote_resources'][1]['resource_type']['identifier']) - self.assertEqual(refs['use_category']['uri'], new_rd['remote_resources'][0]['use_category']['identifier']) - self.assertEqual(refs['license']['uri'], new_rd['remote_resources'][0]['license'][0]['identifier']) - self.assertEqual(refs['resource_type']['label'], - new_rd['remote_resources'][1]['resource_type'].get('pref_label', None)) - self.assertEqual(refs['use_category']['label'], - new_rd['remote_resources'][0]['use_category'].get('pref_label', None)) - self.assertEqual(refs['license']['label'], new_rd['remote_resources'][0]['license'][0].get('title', None)) + self.assertEqual( + refs["resource_type"]["uri"], + new_rd["remote_resources"][1]["resource_type"]["identifier"], + ) + self.assertEqual( + refs["use_category"]["uri"], + new_rd["remote_resources"][0]["use_category"]["identifier"], + ) + self.assertEqual( + refs["license"]["uri"], + new_rd["remote_resources"][0]["license"][0]["identifier"], + ) + self.assertEqual( + refs["resource_type"]["label"], + new_rd["remote_resources"][1]["resource_type"].get("pref_label", None), + ) + self.assertEqual( + refs["use_category"]["label"], + new_rd["remote_resources"][0]["use_category"].get("pref_label", None), + ) + self.assertEqual( + refs["license"]["label"], + new_rd["remote_resources"][0]["license"][0].get("title", None), + ) def _assert_uri_copied_to_identifier(self, refs, new_rd): - self.assertEqual(refs['keyword']['uri'], new_rd['theme'][0]['identifier']) - self.assertEqual(refs['field_of_science']['uri'], new_rd['field_of_science'][0]['identifier']) - self.assertEqual(refs['language']['uri'], new_rd['language'][0]['identifier']) - self.assertEqual(refs['access_type']['uri'], new_rd['access_rights']['access_type']['identifier']) - self.assertEqual(refs['restriction_grounds']['uri'], - new_rd['access_rights']['restriction_grounds'][0]['identifier']) - self.assertEqual(refs['license']['uri'], new_rd['access_rights']['license'][0]['identifier']) - self.assertEqual(refs['identifier_type']['uri'], new_rd['other_identifier'][0]['type']['identifier']) - self.assertEqual(refs['location']['uri'], new_rd['spatial'][0]['place_uri']['identifier']) - self.assertEqual(refs['file_type']['uri'], new_rd['files'][0]['file_type']['identifier']) - self.assertEqual(refs['use_category']['uri'], new_rd['files'][0]['use_category']['identifier']) - - self.assertEqual(refs['use_category']['uri'], new_rd['directories'][0]['use_category']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['is_output_of'][0]['source_organization'][0]['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['is_output_of'][0]['has_funding_agency'][0]['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['other_identifier'][0]['provider']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['contributor'][0]['member_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['creator'][0]['member_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['curator'][0]['is_part_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['publisher']['is_part_of']['identifier']) - self.assertEqual(refs['organization']['uri'], new_rd['rights_holder'][0]['is_part_of']['identifier']) - self.assertEqual(refs['research_infra']['uri'], new_rd['infrastructure'][0]['identifier']) - self.assertEqual(refs['contributor_role']['uri'], new_rd['creator'][0]['contributor_role'][0]['identifier']) - self.assertEqual(refs['contributor_type']['uri'], new_rd['curator'][0]['contributor_type'][0]['identifier']) - self.assertEqual(refs['funder_type']['uri'], new_rd['is_output_of'][0]['funder_type']['identifier']) - self.assertEqual(refs['relation_type']['uri'], new_rd['relation'][0]['relation_type']['identifier']) - self.assertEqual(refs['resource_type']['uri'], new_rd['relation'][0]['entity']['type']['identifier']) - self.assertEqual(refs['lifecycle_event']['uri'], new_rd['provenance'][0]['lifecycle_event']['identifier']) - self.assertEqual(refs['preservation_event']['uri'], new_rd['provenance'][1]['preservation_event']['identifier']) - self.assertEqual(refs['event_outcome']['uri'], new_rd['provenance'][0]['event_outcome']['identifier']) + self.assertEqual(refs["keyword"]["uri"], new_rd["theme"][0]["identifier"]) + self.assertEqual( + refs["field_of_science"]["uri"], new_rd["field_of_science"][0]["identifier"] + ) + self.assertEqual(refs["language"]["uri"], new_rd["language"][0]["identifier"]) + self.assertEqual( + refs["access_type"]["uri"], + new_rd["access_rights"]["access_type"]["identifier"], + ) + self.assertEqual( + refs["restriction_grounds"]["uri"], + new_rd["access_rights"]["restriction_grounds"][0]["identifier"], + ) + self.assertEqual( + refs["license"]["uri"], new_rd["access_rights"]["license"][0]["identifier"] + ) + self.assertEqual( + refs["identifier_type"]["uri"], + new_rd["other_identifier"][0]["type"]["identifier"], + ) + self.assertEqual(refs["location"]["uri"], new_rd["spatial"][0]["place_uri"]["identifier"]) + self.assertEqual(refs["file_type"]["uri"], new_rd["files"][0]["file_type"]["identifier"]) + self.assertEqual( + refs["use_category"]["uri"], + new_rd["files"][0]["use_category"]["identifier"], + ) + + self.assertEqual( + refs["use_category"]["uri"], + new_rd["directories"][0]["use_category"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["is_output_of"][0]["source_organization"][0]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["is_output_of"][0]["has_funding_agency"][0]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["other_identifier"][0]["provider"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["contributor"][0]["member_of"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], new_rd["creator"][0]["member_of"]["identifier"] + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["curator"][0]["is_part_of"]["identifier"], + ) + self.assertEqual( + refs["organization"]["uri"], new_rd["publisher"]["is_part_of"]["identifier"] + ) + self.assertEqual( + refs["organization"]["uri"], + new_rd["rights_holder"][0]["is_part_of"]["identifier"], + ) + self.assertEqual(refs["research_infra"]["uri"], new_rd["infrastructure"][0]["identifier"]) + self.assertEqual( + refs["contributor_role"]["uri"], + new_rd["creator"][0]["contributor_role"][0]["identifier"], + ) + self.assertEqual( + refs["contributor_type"]["uri"], + new_rd["curator"][0]["contributor_type"][0]["identifier"], + ) + self.assertEqual( + refs["funder_type"]["uri"], + new_rd["is_output_of"][0]["funder_type"]["identifier"], + ) + self.assertEqual( + refs["relation_type"]["uri"], + new_rd["relation"][0]["relation_type"]["identifier"], + ) + self.assertEqual( + refs["resource_type"]["uri"], + new_rd["relation"][0]["entity"]["type"]["identifier"], + ) + self.assertEqual( + refs["lifecycle_event"]["uri"], + new_rd["provenance"][0]["lifecycle_event"]["identifier"], + ) + self.assertEqual( + refs["preservation_event"]["uri"], + new_rd["provenance"][1]["preservation_event"]["identifier"], + ) + self.assertEqual( + refs["event_outcome"]["uri"], + new_rd["provenance"][0]["event_outcome"]["identifier"], + ) def _assert_scheme_copied_to_in_scheme(self, refs, new_rd): - self.assertEqual(refs['keyword']['scheme'], new_rd['theme'][0]['in_scheme']) - self.assertEqual(refs['field_of_science']['scheme'], new_rd['field_of_science'][0]['in_scheme']) - self.assertEqual(refs['language']['scheme'], new_rd['language'][0]['in_scheme']) - self.assertEqual(refs['access_type']['scheme'], new_rd['access_rights']['access_type']['in_scheme']) - self.assertEqual(refs['restriction_grounds']['scheme'], - new_rd['access_rights']['restriction_grounds'][0]['in_scheme']) - self.assertEqual(refs['license']['scheme'], new_rd['access_rights']['license'][0]['in_scheme']) - self.assertEqual(refs['identifier_type']['scheme'], new_rd['other_identifier'][0]['type']['in_scheme']) - self.assertEqual(refs['location']['scheme'], new_rd['spatial'][0]['place_uri']['in_scheme']) - self.assertEqual(refs['file_type']['scheme'], new_rd['files'][0]['file_type']['in_scheme']) - self.assertEqual(refs['use_category']['scheme'], new_rd['files'][0]['use_category']['in_scheme']) - - self.assertEqual(refs['use_category']['scheme'], new_rd['directories'][0]['use_category']['in_scheme']) - self.assertEqual(refs['research_infra']['scheme'], new_rd['infrastructure'][0]['in_scheme']) - self.assertEqual(refs['contributor_role']['scheme'], new_rd['creator'][0]['contributor_role']['in_scheme']) - self.assertEqual(refs['contributor_type']['scheme'], new_rd['curator'][0]['contributor_type']['in_scheme']) - self.assertEqual(refs['funder_type']['scheme'], new_rd['is_output_of'][0]['funder_type']['in_scheme']) - self.assertEqual(refs['relation_type']['scheme'], new_rd['relation'][0]['relation_type']['in_scheme']) - self.assertEqual(refs['resource_type']['scheme'], new_rd['relation'][0]['entity']['type']['in_scheme']) - self.assertEqual(refs['lifecycle_event']['scheme'], new_rd['provenance'][0]['lifecycle_event']['in_scheme']) - self.assertEqual(refs['preservation_event']['scheme'], - new_rd['provenance'][1]['preservation_event']['in_scheme']) - self.assertEqual(refs['event_outcome']['scheme'], new_rd['provenance'][0]['event_outcome']['in_scheme']) + self.assertEqual(refs["keyword"]["scheme"], new_rd["theme"][0]["in_scheme"]) + self.assertEqual( + refs["field_of_science"]["scheme"], + new_rd["field_of_science"][0]["in_scheme"], + ) + self.assertEqual(refs["language"]["scheme"], new_rd["language"][0]["in_scheme"]) + self.assertEqual( + refs["access_type"]["scheme"], + new_rd["access_rights"]["access_type"]["in_scheme"], + ) + self.assertEqual( + refs["restriction_grounds"]["scheme"], + new_rd["access_rights"]["restriction_grounds"][0]["in_scheme"], + ) + self.assertEqual( + refs["license"]["scheme"], + new_rd["access_rights"]["license"][0]["in_scheme"], + ) + self.assertEqual( + refs["identifier_type"]["scheme"], + new_rd["other_identifier"][0]["type"]["in_scheme"], + ) + self.assertEqual(refs["location"]["scheme"], new_rd["spatial"][0]["place_uri"]["in_scheme"]) + self.assertEqual(refs["file_type"]["scheme"], new_rd["files"][0]["file_type"]["in_scheme"]) + self.assertEqual( + refs["use_category"]["scheme"], + new_rd["files"][0]["use_category"]["in_scheme"], + ) + + self.assertEqual( + refs["use_category"]["scheme"], + new_rd["directories"][0]["use_category"]["in_scheme"], + ) + self.assertEqual(refs["research_infra"]["scheme"], new_rd["infrastructure"][0]["in_scheme"]) + self.assertEqual( + refs["contributor_role"]["scheme"], + new_rd["creator"][0]["contributor_role"]["in_scheme"], + ) + self.assertEqual( + refs["contributor_type"]["scheme"], + new_rd["curator"][0]["contributor_type"]["in_scheme"], + ) + self.assertEqual( + refs["funder_type"]["scheme"], + new_rd["is_output_of"][0]["funder_type"]["in_scheme"], + ) + self.assertEqual( + refs["relation_type"]["scheme"], + new_rd["relation"][0]["relation_type"]["in_scheme"], + ) + self.assertEqual( + refs["resource_type"]["scheme"], + new_rd["relation"][0]["entity"]["type"]["in_scheme"], + ) + self.assertEqual( + refs["lifecycle_event"]["scheme"], + new_rd["provenance"][0]["lifecycle_event"]["in_scheme"], + ) + self.assertEqual( + refs["preservation_event"]["scheme"], + new_rd["provenance"][1]["preservation_event"]["in_scheme"], + ) + self.assertEqual( + refs["event_outcome"]["scheme"], + new_rd["provenance"][0]["event_outcome"]["in_scheme"], + ) def _assert_label_copied_to_pref_label(self, refs, new_rd): - self.assertEqual(refs['keyword']['label'], new_rd['theme'][0].get('pref_label', None)) - self.assertEqual(refs['field_of_science']['label'], new_rd['field_of_science'][0].get('pref_label', None)) - self.assertEqual(refs['access_type']['label'], new_rd['access_rights']['access_type'].get('pref_label', None)) - self.assertEqual(refs['restriction_grounds']['label'], - new_rd['access_rights']['restriction_grounds'][0].get('pref_label', None)) - self.assertEqual(refs['identifier_type']['label'], - new_rd['other_identifier'][0]['type'].get('pref_label', None)) - self.assertEqual(refs['location']['label'], new_rd['spatial'][0]['place_uri'].get('pref_label', None)) - self.assertEqual(refs['file_type']['label'], new_rd['files'][0]['file_type'].get('pref_label', None)) - self.assertEqual(refs['use_category']['label'], new_rd['files'][0]['use_category'].get('pref_label', None)) - self.assertEqual(refs['use_category']['label'], - new_rd['directories'][0]['use_category'].get('pref_label', None)) - - self.assertEqual(refs['research_infra']['label'], new_rd['infrastructure'][0].get('pref_label', None)) - self.assertEqual(refs['contributor_role']['label'], - new_rd['creator'][0]['contributor_role'][0].get('pref_label', None)) - self.assertEqual(refs['contributor_type']['label'], - new_rd['curator'][0]['contributor_type'][0].get('pref_label', None)) - self.assertEqual(refs['funder_type']['label'], new_rd['is_output_of'][0]['funder_type'].get('pref_label', None)) - self.assertEqual(refs['relation_type']['label'], new_rd['relation'][0]['relation_type'].get('pref_label', None)) - self.assertEqual(refs['resource_type']['label'], - new_rd['relation'][0]['entity']['type'].get('pref_label', None)) - self.assertEqual(refs['lifecycle_event']['label'], - new_rd['provenance'][0]['lifecycle_event'].get('pref_label', None)) - self.assertEqual(refs['preservation_event']['label'], - new_rd['provenance'][1]['preservation_event'].get('pref_label', None)) - self.assertEqual(refs['event_outcome']['label'], - new_rd['provenance'][0]['event_outcome'].get('pref_label', None)) + self.assertEqual(refs["keyword"]["label"], new_rd["theme"][0].get("pref_label", None)) + self.assertEqual( + refs["field_of_science"]["label"], + new_rd["field_of_science"][0].get("pref_label", None), + ) + self.assertEqual( + refs["access_type"]["label"], + new_rd["access_rights"]["access_type"].get("pref_label", None), + ) + self.assertEqual( + refs["restriction_grounds"]["label"], + new_rd["access_rights"]["restriction_grounds"][0].get("pref_label", None), + ) + self.assertEqual( + refs["identifier_type"]["label"], + new_rd["other_identifier"][0]["type"].get("pref_label", None), + ) + self.assertEqual( + refs["location"]["label"], + new_rd["spatial"][0]["place_uri"].get("pref_label", None), + ) + self.assertEqual( + refs["file_type"]["label"], + new_rd["files"][0]["file_type"].get("pref_label", None), + ) + self.assertEqual( + refs["use_category"]["label"], + new_rd["files"][0]["use_category"].get("pref_label", None), + ) + self.assertEqual( + refs["use_category"]["label"], + new_rd["directories"][0]["use_category"].get("pref_label", None), + ) + + self.assertEqual( + refs["research_infra"]["label"], + new_rd["infrastructure"][0].get("pref_label", None), + ) + self.assertEqual( + refs["contributor_role"]["label"], + new_rd["creator"][0]["contributor_role"][0].get("pref_label", None), + ) + self.assertEqual( + refs["contributor_type"]["label"], + new_rd["curator"][0]["contributor_type"][0].get("pref_label", None), + ) + self.assertEqual( + refs["funder_type"]["label"], + new_rd["is_output_of"][0]["funder_type"].get("pref_label", None), + ) + self.assertEqual( + refs["relation_type"]["label"], + new_rd["relation"][0]["relation_type"].get("pref_label", None), + ) + self.assertEqual( + refs["resource_type"]["label"], + new_rd["relation"][0]["entity"]["type"].get("pref_label", None), + ) + self.assertEqual( + refs["lifecycle_event"]["label"], + new_rd["provenance"][0]["lifecycle_event"].get("pref_label", None), + ) + self.assertEqual( + refs["preservation_event"]["label"], + new_rd["provenance"][1]["preservation_event"].get("pref_label", None), + ) + self.assertEqual( + refs["event_outcome"]["label"], + new_rd["provenance"][0]["event_outcome"].get("pref_label", None), + ) def _assert_label_copied_to_title(self, refs, new_rd): - required_langs = dict((lang, val) for lang, val in refs['language']['label'].items() - if lang in ['fi', 'sv', 'en', 'und']) - self.assertEqual(required_langs, new_rd['language'][0].get('title', None)) - self.assertEqual(refs['license']['label'], new_rd['access_rights']['license'][0].get('title', None)) + required_langs = dict( + (lang, val) + for lang, val in refs["language"]["label"].items() + if lang in ["fi", "sv", "en", "und"] + ) + self.assertEqual(required_langs, new_rd["language"][0].get("title", None)) + self.assertEqual( + refs["license"]["label"], + new_rd["access_rights"]["license"][0].get("title", None), + ) def _assert_label_copied_to_name(self, refs, new_rd): - self.assertEqual(refs['organization']['label'], - new_rd['is_output_of'][0]['source_organization'][0].get('name', None)) - self.assertEqual(refs['organization']['label'], - new_rd['is_output_of'][0]['has_funding_agency'][0].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['other_identifier'][0]['provider'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['contributor'][0]['member_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['creator'][0]['member_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['curator'][0]['is_part_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['publisher']['is_part_of'].get('name', None)) - self.assertEqual(refs['organization']['label'], new_rd['rights_holder'][0]['is_part_of'].get('name', None)) + self.assertEqual( + refs["organization"]["label"], + new_rd["is_output_of"][0]["source_organization"][0].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["is_output_of"][0]["has_funding_agency"][0].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["other_identifier"][0]["provider"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["contributor"][0]["member_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["creator"][0]["member_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["curator"][0]["is_part_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["publisher"]["is_part_of"].get("name", None), + ) + self.assertEqual( + refs["organization"]["label"], + new_rd["rights_holder"][0]["is_part_of"].get("name", None), + ) def test_refdata_sub_org_main_org_population(self): # Test parent org gets populated when sub org is from ref data and user has not provided is_part_of relation - self.cr_test_data['research_dataset']['publisher'] = {'@type': 'Organization', 'identifier': '10076-A800'} - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "identifier": "10076-A800", + } + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('is_part_of' in response.data['research_dataset']['publisher'], True) - self.assertEqual('http://uri.suomi.fi/codelist/fairdata/organization/code/10076', - response.data['research_dataset']['publisher']['is_part_of']['identifier']) - self.assertTrue(response.data['research_dataset']['publisher']['is_part_of'].get('name', False)) + self.assertEqual("is_part_of" in response.data["research_dataset"]["publisher"], True) + self.assertEqual( + "http://uri.suomi.fi/codelist/fairdata/organization/code/10076", + response.data["research_dataset"]["publisher"]["is_part_of"]["identifier"], + ) + self.assertTrue( + response.data["research_dataset"]["publisher"]["is_part_of"].get("name", False) + ) # Test parent org does not get populated when sub org is from ref data and user has provided is_part_of relation - self.cr_test_data['research_dataset']['publisher'] = { - '@type': 'Organization', - 'identifier': '10076-A800', - 'is_part_of': { - '@type': 'Organization', - 'identifier': 'test_id', - 'name': {'und': 'test_name'} - }} - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "identifier": "10076-A800", + "is_part_of": { + "@type": "Organization", + "identifier": "test_id", + "name": {"und": "test_name"}, + }, + } + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('is_part_of' in response.data['research_dataset']['publisher'], True) - self.assertEqual('test_id', response.data['research_dataset']['publisher']['is_part_of']['identifier']) - self.assertEqual('test_name', response.data['research_dataset']['publisher']['is_part_of']['name']['und']) + self.assertEqual("is_part_of" in response.data["research_dataset"]["publisher"], True) + self.assertEqual( + "test_id", + response.data["research_dataset"]["publisher"]["is_part_of"]["identifier"], + ) + self.assertEqual( + "test_name", + response.data["research_dataset"]["publisher"]["is_part_of"]["name"]["und"], + ) # Test nothing happens when org is a parent org - self.cr_test_data['research_dataset']['publisher'] = {'@type': 'Organization', 'identifier': '10076'} - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "identifier": "10076", + } + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('is_part_of' not in response.data['research_dataset']['publisher'], True) + self.assertEqual("is_part_of" not in response.data["research_dataset"]["publisher"], True) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/rems.py b/src/metax_api/tests/api/rest/v2/views/datasets/rems.py index 5e412023..1f293ce6 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/rems.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/rems.py @@ -20,72 +20,76 @@ IDA_CATALOG = django_settings.IDA_DATA_CATALOG_IDENTIFIER -@unittest.skipIf(django_settings.REMS['ENABLED'] is not True, 'Only run if REMS is enabled') +@unittest.skipIf(django_settings.REMS["ENABLED"] is not True, "Only run if REMS is enabled") class CatalogRecordApiWriteREMS(CatalogRecordApiWriteCommon): rf = RDM.get_reference_data(cache) # get by code to prevent failures if list ordering changes - access_permit = [type for type in rf['reference_data']['access_type'] if type['code'] == 'permit'][0] - access_open = [type for type in rf['reference_data']['access_type'] if type['code'] == 'open'][0] + access_permit = [ + type for type in rf["reference_data"]["access_type"] if type["code"] == "permit" + ][0] + access_open = [type for type in rf["reference_data"]["access_type"] if type["code"] == "open"][ + 0 + ] permit_rights = { # license type does not matter "license": [ { - "title": rf['reference_data']['license'][0]['label'], - "identifier": rf['reference_data']['license'][0]['uri'] + "title": rf["reference_data"]["license"][0]["label"], + "identifier": rf["reference_data"]["license"][0]["uri"], } ], "access_type": { - "in_scheme": access_permit['scheme'], - "identifier": access_permit['uri'], - "pref_label": access_permit['label'] - } + "in_scheme": access_permit["scheme"], + "identifier": access_permit["uri"], + "pref_label": access_permit["label"], + }, } open_rights = { "access_type": { - "in_scheme": access_open['scheme'], - "identifier": access_open['uri'], - "pref_label": access_open['label'] + "in_scheme": access_open["scheme"], + "identifier": access_open["uri"], + "pref_label": access_open["label"], } } # any other than what is included in permit_rights is sufficient - other_license = rf['reference_data']['license'][1] + other_license = rf["reference_data"]["license"][1] def setUp(self): super().setUp() # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = IDA_CATALOG - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/v2/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/v2/datacatalogs", dc, format="json") # token for end user access self.token = get_test_oidc_token(new_proxy=True) # mock successful rems access for creation, add fails later if needed. # Not using regex to allow individual access failures - for entity in ['user', 'workflow', 'license', 'resource', 'catalogue-item']: - self._mock_rems_write_access_succeeds('POST', entity, 'create') + for entity in ["user", "workflow", "license", "resource", "catalogue-item"]: + self._mock_rems_write_access_succeeds("POST", entity, "create") - self._mock_rems_read_access_succeeds('license') + self._mock_rems_read_access_succeeds("license") # mock successful rems access for deletion. Add fails later - for entity in ['catalogue-item', 'workflow', 'resource']: - self._mock_rems_write_access_succeeds(method='PUT', entity=entity, action='archived') - self._mock_rems_write_access_succeeds(method='PUT', entity=entity, action='enabled') + for entity in ["catalogue-item", "workflow", "resource"]: + self._mock_rems_write_access_succeeds(method="PUT", entity=entity, action="archived") + self._mock_rems_write_access_succeeds(method="PUT", entity=entity, action="enabled") - self._mock_rems_read_access_succeeds('catalogue-item') - self._mock_rems_read_access_succeeds('application') - self._mock_rems_write_access_succeeds(method='POST', entity='application', action='close') + self._mock_rems_read_access_succeeds("catalogue-item") + self._mock_rems_read_access_succeeds("application") + self._mock_rems_write_access_succeeds(method="POST", entity="application", action="close") responses.add( responses.GET, f"{django_settings.REMS['BASE_URL']}/health", - json={'healthy': True}, - status=200 + json={"healthy": True}, + status=200, ) def _get_access_granter(self, malformed=False): @@ -95,7 +99,7 @@ def _get_access_granter(self, malformed=False): access_granter = { "userid": "testcaseuser" if not malformed else 1234, "name": "Test User", - "email": "testcase@user.com" + "email": "testcase@user.com", } return access_granter @@ -106,23 +110,23 @@ def _mock_rems_write_access_succeeds(self, method, entity, action): entity: REMS entity [application, catalogue-item, license, resource, user, workflow] action: Action taken to entity [archived, close, create, edit, enabled] """ - req_type = responses.POST if method == 'POST' else responses.PUT + req_type = responses.POST if method == "POST" else responses.PUT body = {"success": True} - if method == 'POST' and action != 'close': + if method == "POST" and action != "close": # action condition needed because applications are closed with POST method - body['id'] = 6 + body["id"] = 6 responses.add( req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", json=body, - status=200 + status=200, ) def _mock_rems_read_access_succeeds(self, entity): - if entity == 'license': + if entity == "license": resp = [ { "id": 7, @@ -131,14 +135,14 @@ def _mock_rems_read_access_succeeds(self, entity): "archived": False, "localizations": { "fi": { - "title": self.rf['reference_data']['license'][0]['label']['fi'], - "textcontent": self.rf['reference_data']['license'][0]['uri'] + "title": self.rf["reference_data"]["license"][0]["label"]["fi"], + "textcontent": self.rf["reference_data"]["license"][0]["uri"], }, "und": { - "title": self.rf['reference_data']['license'][0]['label']['und'], - "textcontent": self.rf['reference_data']['license'][0]['uri'] - } - } + "title": self.rf["reference_data"]["license"][0]["label"]["und"], + "textcontent": self.rf["reference_data"]["license"][0]["uri"], + }, + }, }, { "id": 8, @@ -147,14 +151,14 @@ def _mock_rems_read_access_succeeds(self, entity): "archived": False, "localizations": { "en": { - "title": self.rf['reference_data']['license'][1]['label']['en'], - "textcontent": self.rf['reference_data']['license'][1]['uri'] + "title": self.rf["reference_data"]["license"][1]["label"]["en"], + "textcontent": self.rf["reference_data"]["license"][1]["uri"], } - } - } + }, + }, ] - elif entity == 'catalogue-item': + elif entity == "catalogue-item": resp = [ { "archived": False, @@ -163,7 +167,7 @@ def _mock_rems_read_access_succeeds(self, entity): "id": 18, "langcode": "en", "title": "Removal test", - "infourl": "https://url.to.etsin.fi" + "infourl": "https://url.to.etsin.fi", } }, "resource-id": 19, @@ -174,113 +178,97 @@ def _mock_rems_read_access_succeeds(self, entity): "id": 18, "expired": False, "end": None, - "enabled": True + "enabled": True, } ] - elif entity == 'application': + elif entity == "application": # only mock relevant data resp = [ { - 'application/workflow': { - 'workflow.dynamic/handlers': [ - { - 'userid': 'somehandler' - } - ] + "application/workflow": { + "workflow.dynamic/handlers": [{"userid": "somehandler"}] }, "application/id": 3, - 'application/applicant': { - 'userid': 'someapplicant' - }, + "application/applicant": {"userid": "someapplicant"}, "application/resources": [ { - "catalogue-item/title": { - "en": "Removal test" - }, + "catalogue-item/title": {"en": "Removal test"}, "resource/ext-id": "some:pref:id", - "catalogue-item/id": 5 + "catalogue-item/id": 5, } ], - "application/state": 'application.state/draft' + "application/state": "application.state/draft", }, { - 'application/workflow': { - 'workflow.dynamic/handlers': [ - { - 'userid': 'someid' - } - ] - }, + "application/workflow": {"workflow.dynamic/handlers": [{"userid": "someid"}]}, "application/id": 2, - 'application/applicant': { - 'userid': 'someotherapplicant' - }, + "application/applicant": {"userid": "someotherapplicant"}, "application/resources": [ { - "catalogue-item/title": { - "en": "Removal test" - }, + "catalogue-item/title": {"en": "Removal test"}, "resource/ext-id": "some:pref:id", - "catalogue-item/id": 5 + "catalogue-item/id": 5, } ], - "application/state": 'application.state/approved' + "application/state": "application.state/approved", }, { - 'application/workflow': { - 'workflow.dynamic/handlers': [ - { - 'userid': 'remsuid' - } - ] - }, + "application/workflow": {"workflow.dynamic/handlers": [{"userid": "remsuid"}]}, "application/id": 1, - 'application/applicant': { - 'userid': 'someapplicant' - }, + "application/applicant": {"userid": "someapplicant"}, "application/resources": [ { - "catalogue-item/title": { - "en": "Removal test" - }, - "resource/ext-id": 'Same:title:with:different:catalogue:item', - "catalogue-item/id": 18 + "catalogue-item/title": {"en": "Removal test"}, + "resource/ext-id": "Same:title:with:different:catalogue:item", + "catalogue-item/id": 18, } ], - "application/state": 'application.state/draft' - } + "application/state": "application.state/draft", + }, ] responses.add( responses.GET, f"{django_settings.REMS['BASE_URL']}/{entity}s", json=resp, - status=200 + status=200, ) - def _mock_rems_access_return_403(self, method, entity, action=''): + def _mock_rems_access_return_403(self, method, entity, action=""): """ Works also for GET method since failure responses from rems are identical for write and read operations """ - req_type = responses.POST if method == 'POST' else responses.PUT if method == 'PUT' else responses.GET + req_type = ( + responses.POST + if method == "POST" + else responses.PUT + if method == "PUT" + else responses.GET + ) responses.replace( req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - status=403 # anything else than 200 is a fail + status=403, # anything else than 200 is a fail ) - def _mock_rems_access_return_error(self, method, entity, action=''): + def _mock_rems_access_return_error(self, method, entity, action=""): """ operation status is defined in the body so 200 response can also be failure. """ - req_type = responses.POST if method == 'POST' else responses.PUT if method == 'PUT' else responses.GET + req_type = ( + responses.POST + if method == "POST" + else responses.PUT + if method == "PUT" + else responses.GET + ) errors = [ { "type": "some kind of identifier of this error", - "somedetail": "entity identifier the error is conserning" + "somedetail": "entity identifier the error is conserning", } ] @@ -288,30 +276,38 @@ def _mock_rems_access_return_error(self, method, entity, action=''): req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", json={"success": False, "errors": errors}, - status=200 + status=200, ) - def _mock_rems_access_crashes(self, method, entity, action=''): + def _mock_rems_access_crashes(self, method, entity, action=""): """ Crash happens for example if there is a network error. Can be used for GET also """ - req_type = responses.POST if method == 'POST' else responses.PUT if method == 'PUT' else responses.GET + req_type = ( + responses.POST + if method == "POST" + else responses.PUT + if method == "PUT" + else responses.GET + ) responses.replace( req_type, f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - body=Exception('REMS_service should catch this one also') + body=Exception("REMS_service should catch this one also"), ) def _create_new_rems_dataset(self): """ Modifies catalog record to be REMS managed and post it to Metax """ - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG - self.cr_test_data['access_granter'] = self._get_access_granter() + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG + self.cr_test_data["access_granter"] = self._get_access_granter() - response = self.client.post('/rest/v2/datasets?include_user_metadata', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?include_user_metadata", self.cr_test_data, format="json" + ) return response @@ -322,26 +318,32 @@ def test_creating_permit_dataset_creates_catalogue_item_service_succeeds(self): """ response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(response.data.get('rems_identifier') is not None, 'rems_identifier should be present') - self.assertTrue(response.data.get('access_granter') is not None, 'access_granter should be present') + self.assertTrue( + response.data.get("rems_identifier") is not None, + "rems_identifier should be present", + ) + self.assertTrue( + response.data.get("access_granter") is not None, + "access_granter should be present", + ) @responses.activate def test_creating_permit_dataset_creates_catalogue_item_service_fails_1(self): """ Test unsuccessful rems access """ - self._mock_rems_access_return_403('POST', 'workflow', 'create') + self._mock_rems_access_return_403("POST", "workflow", "create") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue('failed to publish updates' in response.data['detail'][0], response.data) + self.assertTrue("failed to publish updates" in response.data["detail"][0], response.data) @responses.activate def test_creating_permit_dataset_creates_catalogue_item_service_fails_2(self): """ Test unsuccessful rems access """ - self._mock_rems_access_return_error('POST', 'catalogue-item', 'create') + self._mock_rems_access_return_error("POST", "catalogue-item", "create") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -351,11 +353,11 @@ def test_creating_permit_dataset_creates_catalogue_item_service_fails_3(self): """ Test unsuccessful rems access """ - self._mock_rems_access_crashes('POST', 'resource', 'create') + self._mock_rems_access_crashes("POST", "resource", "create") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue('failed to publish updates' in response.data['detail'][0], response.data) + self.assertTrue("failed to publish updates" in response.data["detail"][0], response.data) @responses.activate def test_changing_dataset_to_permit_creates_new_catalogue_item_succeeds(self): @@ -364,40 +366,46 @@ def test_changing_dataset_to_permit_creates_new_catalogue_item_succeeds(self): """ # create dataset without rems managed access - self.cr_test_data['research_dataset']['access_rights'] = self.open_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # change to rems managed cr = response.data - cr['research_dataset']['access_rights'] = self.permit_rights - cr['access_granter'] = self._get_access_granter() + cr["research_dataset"]["access_rights"] = self.permit_rights + cr["access_granter"] = self._get_access_granter() response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data.get('rems_identifier') is not None, 'rems_identifier should be present') - self.assertTrue(response.data.get('access_granter') is not None, 'access_granter should be present') + self.assertTrue( + response.data.get("rems_identifier") is not None, + "rems_identifier should be present", + ) + self.assertTrue( + response.data.get("access_granter") is not None, + "access_granter should be present", + ) @responses.activate def test_changing_dataset_to_permit_creates_new_catalogue_item_fails(self): """ Test error handling on metax update operation """ - self._mock_rems_access_return_error('POST', 'user', 'create') + self._mock_rems_access_return_error("POST", "user", "create") # create dataset without rems managed access - self.cr_test_data['research_dataset']['access_rights'] = self.open_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # change to rems managed cr = response.data - cr['research_dataset']['access_rights'] = self.permit_rights - cr['access_granter'] = self._get_access_granter() + cr["research_dataset"]["access_rights"] = self.permit_rights + cr["access_granter"] = self._get_access_granter() response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -408,7 +416,7 @@ def test_changing_access_type_to_other_closes_rems_entities_succeeds(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - cr['research_dataset']['access_rights'] = self.open_rights + cr["research_dataset"]["access_rights"] = self.open_rights response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @@ -418,10 +426,10 @@ def test_changing_access_type_to_other_closes_rems_entities_fails(self): response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self._mock_rems_access_return_error('POST', 'application', 'close') + self._mock_rems_access_return_error("POST", "application", "close") cr = response.data - cr['research_dataset']['access_rights'] = self.open_rights + cr["research_dataset"]["access_rights"] = self.open_rights response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -437,11 +445,11 @@ def test_changing_dataset_license_updates_rems(self): cr_before = response.data - rems_id_before = cr_before['rems_identifier'] - cr_before['research_dataset']['access_rights']['license'] = [ + rems_id_before = cr_before["rems_identifier"] + cr_before["research_dataset"]["access_rights"]["license"] = [ { - "title": self.other_license['label'], - "identifier": self.other_license['uri'] + "title": self.other_license["label"], + "identifier": self.other_license["uri"], } ] @@ -449,7 +457,11 @@ def test_changing_dataset_license_updates_rems(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = response.data - self.assertNotEqual(rems_id_before, cr_after['rems_identifier'], 'REMS identifier should have been changed') + self.assertNotEqual( + rems_id_before, + cr_after["rems_identifier"], + "REMS identifier should have been changed", + ) @responses.activate def test_changing_license_dont_allow_access_granter_changes(self): @@ -462,18 +474,20 @@ def test_changing_license_dont_allow_access_granter_changes(self): cr_before = response.data - cr_before['access_granter']['userid'] = 'newid' - cr_before['research_dataset']['access_rights']['license'] = [ - { - "identifier": self.other_license['uri'] - } + cr_before["access_granter"]["userid"] = "newid" + cr_before["research_dataset"]["access_rights"]["license"] = [ + {"identifier": self.other_license["uri"]} ] response = self.client.put(f'/rest/v2/datasets/{cr_before["id"]}', cr_before, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = response.data - self.assertNotEqual('newid', cr_after['access_granter']['userid'], 'userid should not have been changed') + self.assertNotEqual( + "newid", + cr_after["access_granter"]["userid"], + "userid should not have been changed", + ) @responses.activate def test_deleting_license_updates_rems(self): @@ -485,14 +499,20 @@ def test_deleting_license_updates_rems(self): cr_before = response.data - cr_before['research_dataset']['access_rights'].pop('license') + cr_before["research_dataset"]["access_rights"].pop("license") response = self.client.put(f'/rest/v2/datasets/{cr_before["id"]}', cr_before, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = response.data - self.assertTrue(cr_after.get('rems_identifier') is None, 'REMS identifier should have been deleted') - self.assertTrue(cr_after.get('access_granter') is None, 'access_granter should have been deleted') + self.assertTrue( + cr_after.get("rems_identifier") is None, + "REMS identifier should have been deleted", + ) + self.assertTrue( + cr_after.get("access_granter") is None, + "access_granter should have been deleted", + ) @responses.activate def test_creating_permit_dataset_creates_catalogue_item_end_user(self): @@ -500,20 +520,20 @@ def test_creating_permit_dataset_creates_catalogue_item_end_user(self): Tests that catalogue item in REMS is created correctly on permit dataset creation. User information is fetch'd from token. """ - self._set_http_authorization('owner') + self._set_http_authorization("owner") # modify catalog record - self.cr_test_data['user_created'] = self.token['CSCUserName'] - self.cr_test_data['metadata_provider_user'] = self.token['CSCUserName'] - self.cr_test_data['metadata_provider_org'] = self.token['schacHomeOrganization'] - self.cr_test_data['metadata_owner_org'] = self.token['schacHomeOrganization'] - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["user_created"] = self.token["CSCUserName"] + self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] + self.cr_test_data["metadata_provider_org"] = self.token["schacHomeOrganization"] + self.cr_test_data["metadata_owner_org"] = self.token["schacHomeOrganization"] + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG # end user doesn't have permissions to the files and they are also not needed in this test - del self.cr_test_data['research_dataset']['files'] + del self.cr_test_data["research_dataset"]["files"] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) @responses.activate @@ -521,15 +541,15 @@ def test_deleting_permit_dataset_removes_catalogue_item_succeeds(self): response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # delete dataset - response = self.client.delete(f'/rest/v2/datasets/{cr_id}') + response = self.client.delete(f"/rest/v2/datasets/{cr_id}") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - cr = self.client.get(f'/rest/v2/datasets/{cr_id}?removed').data - self.assertTrue(cr.get('rems_identifier') is None, 'rems_identifier should not be present') - self.assertTrue(cr.get('access_granter') is None, 'access_granter should not be present') + cr = self.client.get(f"/rest/v2/datasets/{cr_id}?removed").data + self.assertTrue(cr.get("rems_identifier") is None, "rems_identifier should not be present") + self.assertTrue(cr.get("access_granter") is None, "access_granter should not be present") @responses.activate def test_deleting_permit_dataset_removes_catalogue_item_fails(self): @@ -537,7 +557,7 @@ def test_deleting_permit_dataset_removes_catalogue_item_fails(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # delete dataset - self._mock_rems_access_return_error('PUT', 'catalogue-item', 'enabled') + self._mock_rems_access_return_error("PUT", "catalogue-item", "enabled") response = self.client.delete(f'/rest/v2/datasets/{response.data["id"]}') self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) @@ -549,12 +569,20 @@ def test_deprecating_permit_dataset_removes_catalogue_item_succeeds(self): cr_before = response.data # deprecate dataset - response = self.client.delete(f"/rest/v2/files/{cr_before['research_dataset']['files'][0]['identifier']}") + response = self.client.delete( + f"/rest/v2/files/{cr_before['research_dataset']['files'][0]['identifier']}" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr_after = self.client.get(f'/rest/v2/datasets/{cr_before["id"]}').data - self.assertTrue(cr_after.get('rems_identifier') is None, 'rems_identifier should not be present') - self.assertTrue(cr_after.get('access_granter') is None, 'access_granter should not be present') + self.assertTrue( + cr_after.get("rems_identifier") is None, + "rems_identifier should not be present", + ) + self.assertTrue( + cr_after.get("access_granter") is None, + "access_granter should not be present", + ) @responses.activate def test_deprecating_permit_dataset_removes_catalogue_item_fails(self): @@ -562,11 +590,13 @@ def test_deprecating_permit_dataset_removes_catalogue_item_fails(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # deprecate dataset - self._mock_rems_access_crashes('PUT', 'workflow', 'archived') + self._mock_rems_access_crashes("PUT", "workflow", "archived") - response = self.client.delete(f"/rest/v2/files/{response.data['research_dataset']['files'][0]['identifier']}") + response = self.client.delete( + f"/rest/v2/files/{response.data['research_dataset']['files'][0]['identifier']}" + ) self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue('failed to publish' in response.data['detail'][0], response.data) + self.assertTrue("failed to publish" in response.data["detail"][0], response.data) def test_missing_access_granter(self): """ @@ -575,70 +605,78 @@ def test_missing_access_granter(self): """ # test on create - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('access_granter' in response.data['detail'][0], response.data) + self.assertTrue("access_granter" in response.data["detail"][0], response.data) # test on update - self.cr_test_data['research_dataset']['access_rights'] = self.open_rights - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - cr['research_dataset']['access_rights'] = self.permit_rights + cr["research_dataset"]["access_rights"] = self.permit_rights response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('access_granter' in response.data['detail'][0], response.data) + self.assertTrue("access_granter" in response.data["detail"][0], response.data) def test_bad_access_granter_parameter(self): """ Access_granter values must be strings """ - self.cr_test_data['research_dataset']['access_rights'] = self.permit_rights - self.cr_test_data['data_catalog'] = IDA_CATALOG - self.cr_test_data['access_granter'] = self._get_access_granter(malformed=True) + self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights + self.cr_test_data["data_catalog"] = IDA_CATALOG + self.cr_test_data["access_granter"] = self._get_access_granter(malformed=True) - response = self.client.post( - '/rest/v2/datasets', - self.cr_test_data, - format="json" - ) + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('must be string' in response.data['detail'][0], response.data) + self.assertTrue("must be string" in response.data["detail"][0], response.data) def test_missing_license_in_dataset(self): """ License is required when dataset is REMS managed """ - self.cr_test_data['research_dataset']['access_rights'] = deepcopy(self.permit_rights) - del self.cr_test_data['research_dataset']['access_rights']['license'] - self.cr_test_data['data_catalog'] = IDA_CATALOG + self.cr_test_data["research_dataset"]["access_rights"] = deepcopy(self.permit_rights) + del self.cr_test_data["research_dataset"]["access_rights"]["license"] + self.cr_test_data["data_catalog"] = IDA_CATALOG response = self.client.post( - f'/rest/v2/datasets?access_granter={self._get_access_granter()}', + f"/rest/v2/datasets?access_granter={self._get_access_granter()}", self.cr_test_data, - format="json" + format="json", ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('must define license' in response.data['detail'][0], response.data) + self.assertTrue("must define license" in response.data["detail"][0], response.data) @responses.activate def test_only_return_rems_info_to_privileged(self): - self._set_http_authorization('service') + self._set_http_authorization("service") response = self._create_new_rems_dataset() self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(response.data.get('rems_identifier') is not None, 'rems_identifier should be returned to owner') - self.assertTrue(response.data.get('access_granter') is not None, 'access_granter should be returned to owner') + self.assertTrue( + response.data.get("rems_identifier") is not None, + "rems_identifier should be returned to owner", + ) + self.assertTrue( + response.data.get("access_granter") is not None, + "access_granter should be returned to owner", + ) - self._set_http_authorization('no') + self._set_http_authorization("no") response = self.client.get(f'/rest/v2/datasets/{response.data["id"]}') self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data.get('rems_identifier') is None, 'rems_identifier should not be returned to Anon') - self.assertTrue(response.data.get('access_granter') is None, 'access_granter should not be returned to Anon') + self.assertTrue( + response.data.get("rems_identifier") is None, + "rems_identifier should not be returned to Anon", + ) + self.assertTrue( + response.data.get("access_granter") is None, + "access_granter should not be returned to Anon", + ) @responses.activate def test_rems_info_cannot_be_changed(self): @@ -647,10 +685,18 @@ def test_rems_info_cannot_be_changed(self): cr = response.data - cr['rems_identifier'] = 'some:new:identifier' - cr['access_granter']['name'] = 'New Name' + cr["rems_identifier"] = "some:new:identifier" + cr["access_granter"]["name"] = "New Name" response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertNotEqual(response.data['rems_identifier'], 'some:new:identifier', 'rems_id should not be changed') - self.assertNotEqual(response.data['access_granter'], 'New Name', 'access_granter should not be changed') + self.assertNotEqual( + response.data["rems_identifier"], + "some:new:identifier", + "rems_id should not be changed", + ) + self.assertNotEqual( + response.data["access_granter"], + "New Name", + "access_granter should not be changed", + ) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/write.py b/src/metax_api/tests/api/rest/v2/views/datasets/write.py index 58bce4cb..ef00e592 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/write.py @@ -38,19 +38,24 @@ def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord') - self.preferred_identifier = catalog_record_from_test_data['research_dataset']['preferred_identifier'] - self.identifier = catalog_record_from_test_data['identifier'] - self.pk = catalog_record_from_test_data['id'] + call_command("loaddata", test_data_file_path, verbosity=0) + catalog_record_from_test_data = self._get_object_from_test_data("catalogrecord") + self.preferred_identifier = catalog_record_from_test_data["research_dataset"][ + "preferred_identifier" + ] + self.identifier = catalog_record_from_test_data["identifier"] + self.pk = catalog_record_from_test_data["id"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified slightly as approriate for different purposes """ self.cr_test_data = self._get_new_test_cr_data() - self.cr_test_data['research_dataset']['publisher'] = {'@type': 'Organization', 'name': {'und': 'Testaaja'}} - self.cr_test_data['research_dataset']['issued'] = '2010-01-01' + self.cr_test_data["research_dataset"]["publisher"] = { + "@type": "Organization", + "name": {"und": "Testaaja"}, + } + self.cr_test_data["research_dataset"]["issued"] = "2010-01-01" self.cr_att_test_data = self._get_new_test_cr_data(cr_index=14, dc_index=5) self.cr_test_data_new_identifier = self._get_new_test_cr_data_with_updated_identifier() @@ -60,11 +65,13 @@ def setUp(self): self._use_http_authorization() def update_record(self, record): - return self.client.put('/rest/v2/datasets/%d' % record['id'], record, format="json") + return self.client.put("/rest/v2/datasets/%d" % record["id"], record, format="json") def get_next_version(self, record): - self.assertEqual('next_dataset_version' in record, True) - response = self.client.get('/rest/v2/datasets/%d' % record['next_dataset_version']['id'], format="json") + self.assertEqual("next_dataset_version" in record, True) + response = self.client.get( + "/rest/v2/datasets/%d" % record["next_dataset_version"]["id"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data @@ -77,45 +84,62 @@ def get_next_version(self, record): # def _get_new_test_cr_data(self, cr_index=0, dc_index=0, c_index=0): - dc = self._get_object_from_test_data('datacatalog', requested_index=dc_index) - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=cr_index) - - if dc['catalog_json']['research_dataset_schema'] == 'ida' and \ - 'remote_resources' in catalog_record_from_test_data['research_dataset']: - self.fail("Cannot generate the requested test catalog record since requested data catalog is indicates ida " - "schema and the requested catalog record is having remote resources, which is not allowed") - - if dc['catalog_json']['research_dataset_schema'] == 'att' and \ - ('files' in catalog_record_from_test_data['research_dataset'] or - 'directories' in catalog_record_from_test_data['research_dataset']): - self.fail("Cannot generate the requested test catalog record since requested data catalog is indicates att " - "schema and the requested catalog record is having files or directories, which is not allowed") - - catalog_record_from_test_data.update({ - "contract": self._get_object_from_test_data('contract', requested_index=c_index), - "data_catalog": dc - }) - catalog_record_from_test_data['research_dataset'].update({ - "creator": [{ - "@type": "Person", - "name": "Teppo Testaaja", - "member_of": { - "@type": "Organization", - "name": {"fi": "Mysterious Organization"} - } - }], - "curator": [{ - "@type": "Person", - "name": "Default Owner", - "member_of": { - "@type": "Organization", - "name": {"fi": "Mysterious Organization"} - } - }] - }) - catalog_record_from_test_data['research_dataset'].pop('preferred_identifier', None) - catalog_record_from_test_data['research_dataset'].pop('metadata_version_identifier', None) - catalog_record_from_test_data.pop('identifier', None) + dc = self._get_object_from_test_data("datacatalog", requested_index=dc_index) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=cr_index + ) + + if ( + dc["catalog_json"]["research_dataset_schema"] == "ida" + and "remote_resources" in catalog_record_from_test_data["research_dataset"] + ): + self.fail( + "Cannot generate the requested test catalog record since requested data catalog is indicates ida " + "schema and the requested catalog record is having remote resources, which is not allowed" + ) + + if dc["catalog_json"]["research_dataset_schema"] == "att" and ( + "files" in catalog_record_from_test_data["research_dataset"] + or "directories" in catalog_record_from_test_data["research_dataset"] + ): + self.fail( + "Cannot generate the requested test catalog record since requested data catalog is indicates att " + "schema and the requested catalog record is having files or directories, which is not allowed" + ) + + catalog_record_from_test_data.update( + { + "contract": self._get_object_from_test_data("contract", requested_index=c_index), + "data_catalog": dc, + } + ) + catalog_record_from_test_data["research_dataset"].update( + { + "creator": [ + { + "@type": "Person", + "name": "Teppo Testaaja", + "member_of": { + "@type": "Organization", + "name": {"fi": "Mysterious Organization"}, + }, + } + ], + "curator": [ + { + "@type": "Person", + "name": "Default Owner", + "member_of": { + "@type": "Organization", + "name": {"fi": "Mysterious Organization"}, + }, + } + ], + } + ) + catalog_record_from_test_data["research_dataset"].pop("preferred_identifier", None) + catalog_record_from_test_data["research_dataset"].pop("metadata_version_identifier", None) + catalog_record_from_test_data.pop("identifier", None) return catalog_record_from_test_data def _get_new_test_cr_data_with_updated_identifier(self): @@ -129,26 +153,40 @@ def _get_new_full_test_ida_cr_data(self): """ Returns one of the fuller generated test datasets """ - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=11) - data_catalog_from_test_data = self._get_object_from_test_data('datacatalog', requested_index=0) - return self._get_new_full_test_cr_data(catalog_record_from_test_data, data_catalog_from_test_data) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=11 + ) + data_catalog_from_test_data = self._get_object_from_test_data( + "datacatalog", requested_index=0 + ) + return self._get_new_full_test_cr_data( + catalog_record_from_test_data, data_catalog_from_test_data + ) def _get_new_full_test_att_cr_data(self): """ Returns one of the fuller generated test datasets """ - catalog_record_from_test_data = self._get_object_from_test_data('catalogrecord', requested_index=23) - data_catalog_from_test_data = self._get_object_from_test_data('datacatalog', requested_index=5) - return self._get_new_full_test_cr_data(catalog_record_from_test_data, data_catalog_from_test_data) + catalog_record_from_test_data = self._get_object_from_test_data( + "catalogrecord", requested_index=23 + ) + data_catalog_from_test_data = self._get_object_from_test_data( + "datacatalog", requested_index=5 + ) + return self._get_new_full_test_cr_data( + catalog_record_from_test_data, data_catalog_from_test_data + ) def _get_new_full_test_cr_data(self, cr_from_test_data, dc_from_test_data): - cr_from_test_data.update({ - "contract": self._get_object_from_test_data('contract', requested_index=0), - "data_catalog": dc_from_test_data - }) - cr_from_test_data['research_dataset'].pop('metadata_version_identifier') - cr_from_test_data['research_dataset'].pop('preferred_identifier') - cr_from_test_data.pop('identifier') + cr_from_test_data.update( + { + "contract": self._get_object_from_test_data("contract", requested_index=0), + "data_catalog": dc_from_test_data, + } + ) + cr_from_test_data["research_dataset"].pop("metadata_version_identifier") + cr_from_test_data["research_dataset"].pop("preferred_identifier") + cr_from_test_data.pop("identifier") return cr_from_test_data @@ -164,49 +202,58 @@ def setUp(self): super().setUp() def test_issued_date_is_generated(self): - ''' Issued date is generated for all but harvested catalogs if it doesn't exists ''' + """ Issued date is generated for all but harvested catalogs if it doesn't exists """ dc = DataCatalog.objects.get(pk=2) - dc.catalog_json['identifier'] = IDA_CATALOG # Test with IDA catalog + dc.catalog_json["identifier"] = IDA_CATALOG # Test with IDA catalog dc.force_save() - self.cr_test_data['data_catalog'] = dc.catalog_json - self.cr_test_data['research_dataset'].pop('issued', None) + self.cr_test_data["data_catalog"] = dc.catalog_json + self.cr_test_data["research_dataset"].pop("issued", None) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue('issued' in response.data['research_dataset'], response.data) + self.assertTrue("issued" in response.data["research_dataset"], response.data) def test_create_catalog_record(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = 'this_should_be_overwritten' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "this_should_be_overwritten" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True) - self.assertEqual('metadata_version_identifier' in response.data['research_dataset'], True, - 'metadata_version_identifier should have been generated') - self.assertEqual('preferred_identifier' in response.data['research_dataset'], True, - 'preferred_identifier should have been generated') + self.assertEqual("research_dataset" in response.data.keys(), True) + self.assertEqual( + "metadata_version_identifier" in response.data["research_dataset"], + True, + "metadata_version_identifier should have been generated", + ) + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"], + True, + "preferred_identifier should have been generated", + ) self.assertNotEqual( - self.cr_test_data['research_dataset']['preferred_identifier'], - response.data['research_dataset']['preferred_identifier'], - 'in fairdata catalogs, user is not allowed to set preferred_identifier' + self.cr_test_data["research_dataset"]["preferred_identifier"], + response.data["research_dataset"]["preferred_identifier"], + "in fairdata catalogs, user is not allowed to set preferred_identifier", ) self.assertNotEqual( - response.data['research_dataset']['preferred_identifier'], - response.data['research_dataset']['metadata_version_identifier'], - 'preferred_identifier and metadata_version_identifier should be generated separately' + response.data["research_dataset"]["preferred_identifier"], + response.data["research_dataset"]["metadata_version_identifier"], + "preferred_identifier and metadata_version_identifier should be generated separately", + ) + cr = CatalogRecordV2.objects.get(pk=response.data["id"]) + self.assertEqual( + cr.date_created >= get_tz_aware_now_without_micros() - timedelta(seconds=5), + True, + "Timestamp should have been updated during object creation", ) - cr = CatalogRecordV2.objects.get(pk=response.data['id']) - self.assertEqual(cr.date_created >= get_tz_aware_now_without_micros() - timedelta(seconds=5), True, - 'Timestamp should have been updated during object creation') def test_create_catalog_record_as_harvester(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = 'this_should_be_saved' - self.cr_test_data['data_catalog'] = 3 - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "this_should_be_saved" + self.cr_test_data["data_catalog"] = 3 + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual( - self.cr_test_data['research_dataset']['preferred_identifier'], - response.data['research_dataset']['preferred_identifier'], - 'in harvested catalogs, user (the harvester) is allowed to set preferred_identifier' + self.cr_test_data["research_dataset"]["preferred_identifier"], + response.data["research_dataset"]["preferred_identifier"], + "in harvested catalogs, user (the harvester) is allowed to set preferred_identifier", ) def test_preferred_identifier_is_checked_also_from_deleted_records(self): @@ -218,83 +265,134 @@ def test_preferred_identifier_is_checked_also_from_deleted_records(self): # dc 3 happens to be harvested catalog, which allows setting pref id cr = CatalogRecordV2.objects.filter(data_catalog_id=3).first() - response = self.client.delete('/rest/v2/datasets/%d' % cr.id) + response = self.client.delete("/rest/v2/datasets/%d" % cr.id) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - self.cr_test_data['research_dataset']['preferred_identifier'] = cr.preferred_identifier - self.cr_test_data['data_catalog'] = 3 - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = cr.preferred_identifier + self.cr_test_data["data_catalog"] = 3 + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('already exists' in response.data['research_dataset'][0], True, response.data) + self.assertEqual( + "already exists" in response.data["research_dataset"][0], + True, + response.data, + ) def test_create_catalog_contract_string_identifier(self): - contract_identifier = Contract.objects.first().contract_json['identifier'] - self.cr_test_data['contract'] = contract_identifier - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + contract_identifier = Contract.objects.first().contract_json["identifier"] + self.cr_test_data["contract"] = contract_identifier + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['contract']['identifier'], contract_identifier, response.data) + self.assertEqual( + response.data["contract"]["identifier"], contract_identifier, response.data + ) def test_create_catalog_error_contract_string_identifier_not_found(self): - self.cr_test_data['contract'] = 'doesnotexist' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["contract"] = "doesnotexist" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") # self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Should have raised 404 not found') self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('contract' in response.data, True, 'Error should have been about contract not found') + self.assertEqual( + "contract" in response.data, + True, + "Error should have been about contract not found", + ) def test_create_catalog_record_json_validation_error_1(self): """ Ensure the json path of the error is returned along with other details """ - self.cr_test_data['research_dataset']["title"] = 1234456 - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["title"] = 1234456 + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data), 2, 'there should be two errors (error_identifier is one of them)') - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should concern the field research_dataset') - self.assertEqual('1234456 is not of type' in response.data['research_dataset'][0], True, response.data) - self.assertEqual('Json path: [\'title\']' in response.data['research_dataset'][0], True, response.data) + self.assertEqual( + len(response.data), + 2, + "there should be two errors (error_identifier is one of them)", + ) + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should concern the field research_dataset", + ) + self.assertEqual( + "1234456 is not of type" in response.data["research_dataset"][0], + True, + response.data, + ) + self.assertEqual( + "Json path: ['title']" in response.data["research_dataset"][0], + True, + response.data, + ) def test_create_catalog_record_json_validation_error_2(self): """ Ensure the json path of the error is returned along with other details also in objects that are deeply nested """ - self.cr_test_data['research_dataset']['provenance'] = [{ - 'title': {'en': 'provenance title'}, - 'was_associated_with': [ - {'@type': 'Person', 'xname': 'seppo'} - ] - }] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["provenance"] = [ + { + "title": {"en": "provenance title"}, + "was_associated_with": [{"@type": "Person", "xname": "seppo"}], + } + ] + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data), 2, 'there should be two errors (error_identifier is one of them)') - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should concern the field research_dataset') - self.assertEqual('is not valid' in response.data['research_dataset'][0], True, response.data) - self.assertEqual('was_associated_with' in response.data['research_dataset'][0], True, response.data) + self.assertEqual( + len(response.data), + 2, + "there should be two errors (error_identifier is one of them)", + ) + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should concern the field research_dataset", + ) + self.assertEqual( + "is not valid" in response.data["research_dataset"][0], True, response.data + ) + self.assertEqual( + "was_associated_with" in response.data["research_dataset"][0], + True, + response.data, + ) def test_create_catalog_record_allowed_projects_ok(self): - response = self.client.post('/rest/v2/datasets?allowed_projects=project_x', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?allowed_projects=project_x", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_create_catalog_record_allowed_projects_fail(self): # dataset file not in allowed projects response = self.client.post( - '/rest/v2/datasets?allowed_projects=no,permission', self.cr_test_data, format="json" + "/rest/v2/datasets?allowed_projects=no,permission", + self.cr_test_data, + format="json", ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # ensure list is properly handled (separated by comma, end result should be list) - response = self.client.post('/rest/v2/datasets?allowed_projects=no_good_project_x,another', - self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?allowed_projects=no_good_project_x,another", + self.cr_test_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # handle empty value - response = self.client.post('/rest/v2/datasets?allowed_projects=', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?allowed_projects=", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # Other trickery - response = self.client.post('/rest/v2/datasets?allowed_projects=,', self.cr_test_data, format="json") + response = self.client.post( + "/rest/v2/datasets?allowed_projects=,", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # @@ -302,19 +400,25 @@ def test_create_catalog_record_allowed_projects_fail(self): # def test_create_catalog_record_list(self): - response = self.client.post('/rest/v2/datasets', - [self.cr_test_data, self.cr_test_data_new_identifier], format="json") + response = self.client.post( + "/rest/v2/datasets", + [self.cr_test_data, self.cr_test_data_new_identifier], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['success'][0].keys(), True) - self.assertEqual(len(response.data['success']), 2) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["success"][0].keys(), True) + self.assertEqual(len(response.data["success"]), 2) + self.assertEqual(len(response.data["failed"]), 0) def test_create_catalog_record_list_error_one_fails(self): - self.cr_test_data['research_dataset']["title"] = 1234456 - response = self.client.post('/rest/v2/datasets', - [self.cr_test_data, self.cr_test_data_new_identifier], format="json") + self.cr_test_data["research_dataset"]["title"] = 1234456 + response = self.client.post( + "/rest/v2/datasets", + [self.cr_test_data, self.cr_test_data_new_identifier], + format="json", + ) """ List response looks like @@ -333,34 +437,41 @@ def test_create_catalog_record_list_error_one_fails(self): } """ self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual('research_dataset' in response.data['failed'][0]['errors'], True, response.data) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual( + "research_dataset" in response.data["failed"][0]["errors"], + True, + response.data, + ) self.assertEqual( - '1234456 is not of type' in response.data['failed'][0]['errors']['research_dataset'][0], + "1234456 is not of type" in response.data["failed"][0]["errors"]["research_dataset"][0], True, - response.data + response.data, ) self.assertEqual( - 'Json path: [\'title\']' in response.data['failed'][0]['errors']['research_dataset'][0], + "Json path: ['title']" in response.data["failed"][0]["errors"]["research_dataset"][0], True, - response.data + response.data, ) def test_create_catalog_record_list_error_all_fail(self): # data catalog is a required field, should fail - self.cr_test_data['data_catalog'] = None - self.cr_test_data_new_identifier['data_catalog'] = None + self.cr_test_data["data_catalog"] = None + self.cr_test_data_new_identifier["data_catalog"] = None - response = self.client.post('/rest/v2/datasets', - [self.cr_test_data, self.cr_test_data_new_identifier], format="json") + response = self.client.post( + "/rest/v2/datasets", + [self.cr_test_data, self.cr_test_data_new_identifier], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual(len(response.data['success']), 0) - self.assertEqual(len(response.data['failed']), 2) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual(len(response.data["success"]), 0) + self.assertEqual(len(response.data["failed"]), 2) def test_parameter_migration_override_preferred_identifier_when_creating(self): """ @@ -368,11 +479,13 @@ def test_parameter_migration_override_preferred_identifier_when_creating(self): permitted. Using the optional query parameter ?migration_override=bool a custom preferred_identifier can be passed. """ - custom_pid = 'custom-pid-value' - self.cr_test_data['research_dataset']['preferred_identifier'] = custom_pid - response = self.client.post('/rest/v2/datasets?migration_override', self.cr_test_data, format="json") + custom_pid = "custom-pid-value" + self.cr_test_data["research_dataset"]["preferred_identifier"] = custom_pid + response = self.client.post( + "/rest/v2/datasets?migration_override", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], custom_pid) + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], custom_pid) def test_parameter_migration_override_no_preferred_identifier_when_creating(self): """ @@ -380,47 +493,67 @@ def test_parameter_migration_override_no_preferred_identifier_when_creating(self permitted. Using the optional query parameter ?migration_override=bool a custom preferred_identifier can be passed. """ - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets?migration_override', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post( + "/rest/v2/datasets?migration_override", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(len(response.data['research_dataset']['preferred_identifier']) > 0) + self.assertTrue(len(response.data["research_dataset"]["preferred_identifier"]) > 0) - self.cr_test_data['research_dataset'].pop('preferred_identifier', None) - response = self.client.post('/rest/v2/datasets?migration_override', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"].pop("preferred_identifier", None) + response = self.client.post( + "/rest/v2/datasets?migration_override", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(len(response.data['research_dataset']['preferred_identifier']) > 0) + self.assertTrue(len(response.data["research_dataset"]["preferred_identifier"]) > 0) def test_create_catalog_record_using_pid_type(self): # Test with pid_type = urn - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets?pid_type=urn', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('urn:')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post( + "/rest/v2/datasets?pid_type=urn", self.cr_test_data, format="json" + ) + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("urn:") + ) # Test with pid_type = doi AND not ida catalog - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets?pid_type=doi', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post( + "/rest/v2/datasets?pid_type=doi", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = IDA_CATALOG - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/v2/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/v2/datacatalogs", dc, format="json") # Test with pid_type = doi AND ida catalog - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - self.cr_test_data['data_catalog'] = IDA_CATALOG - response = self.client.post('/rest/v2/datasets?pid_type=doi', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('doi:10.')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + self.cr_test_data["data_catalog"] = IDA_CATALOG + response = self.client.post( + "/rest/v2/datasets?pid_type=doi", self.cr_test_data, format="json" + ) + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("doi:10.") + ) # Test with pid_type = not_known - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets?pid_type=not_known', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('urn:')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post( + "/rest/v2/datasets?pid_type=not_known", self.cr_test_data, format="json" + ) + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("urn:") + ) # Test without pid_type - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") - self.assertTrue(response.data['research_dataset']['preferred_identifier'].startswith('urn:')) + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") + self.assertTrue( + response.data["research_dataset"]["preferred_identifier"].startswith("urn:") + ) class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): @@ -439,65 +572,91 @@ class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): # create operations # - def test_create_catalog_record_error_preferred_identifier_cant_be_metadata_version_identifier(self): + def test_create_catalog_record_error_preferred_identifier_cant_be_metadata_version_identifier( + self, + ): """ preferred_identifier can never be the same as a metadata_version_identifier in another cr, in any catalog. """ - existing_metadata_version_identifier = CatalogRecordV2.objects.get(pk=1).metadata_version_identifier - self.cr_test_data['research_dataset']['preferred_identifier'] = existing_metadata_version_identifier + existing_metadata_version_identifier = CatalogRecordV2.objects.get( + pk=1 + ).metadata_version_identifier + self.cr_test_data["research_dataset"][ + "preferred_identifier" + ] = existing_metadata_version_identifier # setting preferred_identifier is only allowed in harvested catalogs. - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should be about an error in research_dataset') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should be about an error in research_dataset", + ) # the error message should clearly state that the value of preferred_identifier appears in the # field metadata_version_identifier in another record, therefore two asserts - self.assertEqual('preferred_identifier' in response.data['research_dataset'][0], True, - 'The error should be about metadata_version_identifier existing with this identifier') - self.assertEqual('metadata_version_identifier' in response.data['research_dataset'][0], True, - 'The error should be about metadata_version_identifier existing with this identifier') + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"][0], + True, + "The error should be about metadata_version_identifier existing with this identifier", + ) + self.assertEqual( + "metadata_version_identifier" in response.data["research_dataset"][0], + True, + "The error should be about metadata_version_identifier existing with this identifier", + ) - def test_create_catalog_record_error_preferred_identifier_exists_in_same_catalog(self): + def test_create_catalog_record_error_preferred_identifier_exists_in_same_catalog( + self, + ): """ preferred_identifier already existing in the same data catalog is an error """ - self.cr_test_data['research_dataset']['preferred_identifier'] = 'pid_by_harvester' - self.cr_test_data['data_catalog'] = 3 - cr_1 = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json").data + self.cr_test_data["research_dataset"]["preferred_identifier"] = "pid_by_harvester" + self.cr_test_data["data_catalog"] = 3 + cr_1 = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json").data - self.cr_test_data['research_dataset']['preferred_identifier'] = \ - cr_1['research_dataset']['preferred_identifier'] + self.cr_test_data["research_dataset"]["preferred_identifier"] = cr_1["research_dataset"][ + "preferred_identifier" + ] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True, - 'The error should be about an error in research_dataset') - self.assertEqual('preferred_identifier' in response.data['research_dataset'][0], True, - 'The error should be about preferred_identifier already existing') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "The error should be about an error in research_dataset", + ) + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"][0], + True, + "The error should be about preferred_identifier already existing", + ) def test_create_catalog_record_preferred_identifier_exists_in_another_catalog(self): """ preferred_identifier existing in another data catalog is not an error. """ unique_identifier = self._set_preferred_identifier_to_record(pk=1, catalog_id=1) - self.cr_test_data['research_dataset']['preferred_identifier'] = unique_identifier + self.cr_test_data["research_dataset"]["preferred_identifier"] = unique_identifier # different catalog, should be OK (not ATT catalog, so preferred_identifier being saved # can exist in other catalogs) - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # # update operations # - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1(self): + def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1( + self, + ): """ preferred_identifier existing in another data catalog is not an error. @@ -510,13 +669,15 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1( cr.data_catalog_id = 3 cr.save() - data = self.client.get('/rest/v2/datasets/3').data - data['research_dataset']['preferred_identifier'] = unique_identifier + data = self.client.get("/rest/v2/datasets/3").data + data["research_dataset"]["preferred_identifier"] = unique_identifier - response = self.client.patch('/rest/v2/datasets/3', data, format="json") + response = self.client.patch("/rest/v2/datasets/3", data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2(self): + def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2( + self, + ): """ preferred_identifier existing in another data catalog is not an error. @@ -529,14 +690,16 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2( """ unique_identifier = self._set_preferred_identifier_to_record(pk=1, catalog_id=1) - data = self.client.get('/rest/v2/datasets/3').data - data['research_dataset']['preferred_identifier'] = unique_identifier - data['data_catalog'] = 3 + data = self.client.get("/rest/v2/datasets/3").data + data["research_dataset"]["preferred_identifier"] = unique_identifier + data["data_catalog"] = 3 - response = self.client.patch('/rest/v2/datasets/3', data, format="json") + response = self.client.patch("/rest/v2/datasets/3", data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, data) - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3(self): + def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3( + self, + ): """ preferred_identifier already existing in the same data catalog is an error, in other catalogs than ATT: Harvester or other catalogs cant contain same @@ -553,28 +716,35 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3( # setup the record in db which will cause conflict unique_identifier = self._set_preferred_identifier_to_record(pk=3, catalog_id=3) - data = {'research_dataset': self.cr_test_data['research_dataset']} - data['research_dataset']['preferred_identifier'] = unique_identifier - data['data_catalog'] = 3 + data = {"research_dataset": self.cr_test_data["research_dataset"]} + data["research_dataset"]["preferred_identifier"] = unique_identifier + data["data_catalog"] = 3 - response = self.client.patch('/rest/v2/datasets/2', data, format="json") + response = self.client.patch("/rest/v2/datasets/2", data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('preferred_identifier' in response.data['research_dataset'][0], True, - 'The error should be about preferred_identifier already existing') + self.assertEqual( + "preferred_identifier" in response.data["research_dataset"][0], + True, + "The error should be about preferred_identifier already existing", + ) def test_remote_doi_dataset_is_validated_against_datacite_format(self): # Remote input DOI ids need to take datasets for datacite validation - cr = {'research_dataset': self.cr_test_data['research_dataset']} - cr['research_dataset']['preferred_identifier'] = 'doi:10.5061/dryad.10188854' - cr['data_catalog'] = 3 - cr['metadata_provider_org'] = 'metax' - cr['metadata_provider_user'] = 'metax' - cr['research_dataset'].pop('publisher', None) - - response = self.client.post('/rest/v2/datasets', cr, format="json") + cr = {"research_dataset": self.cr_test_data["research_dataset"]} + cr["research_dataset"]["preferred_identifier"] = "doi:10.5061/dryad.10188854" + cr["data_catalog"] = 3 + cr["metadata_provider_org"] = "metax" + cr["metadata_provider_user"] = "metax" + cr["research_dataset"].pop("publisher", None) + + response = self.client.post("/rest/v2/datasets", cr, format="json") # Publisher value is required for datacite format, so this should return Http400 self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('a required value for datacite format' in response.data['detail'][0], True, response.data) + self.assertEqual( + "a required value for datacite format" in response.data["detail"][0], + True, + response.data, + ) # # helpers @@ -585,9 +755,9 @@ def _set_preferred_identifier_to_record(self, pk=None, catalog_id=None): Set preferred_identifier to an existing record to a value, and return that value, which will then be used by the test to create or update another record. """ - unique_identifier = 'im unique yo' + unique_identifier = "im unique yo" cr = CatalogRecordV2.objects.get(pk=pk) - cr.research_dataset['preferred_identifier'] = unique_identifier + cr.research_dataset["preferred_identifier"] = unique_identifier cr.data_catalog_id = catalog_id cr.force_save() cr._handle_preferred_identifier_changed() @@ -605,29 +775,29 @@ class CatalogRecordApiWriteDatasetSchemaSelection(CatalogRecordApiWriteCommon): def _set_data_catalog_schema_to_harvester(self): dc = DataCatalog.objects.get(pk=1) - dc.catalog_json['research_dataset_schema'] = 'harvester' + dc.catalog_json["research_dataset_schema"] = "harvester" dc.save() def setUp(self): super().setUp() self._set_data_catalog_schema_to_harvester() - self.cr_test_data['research_dataset']['preferred_identifier'] = 'unique_pid' + self.cr_test_data["research_dataset"]["preferred_identifier"] = "unique_pid" def test_catalog_record_with_not_found_json_schema_gets_default_schema(self): # catalog has dataset schema, but it is not found on the server dc = DataCatalog.objects.get(pk=1) - dc.catalog_json['research_dataset_schema'] = 'nonexisting' + dc.catalog_json["research_dataset_schema"] = "nonexisting" dc.save() - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # catalog has no dataset schema at all dc = DataCatalog.objects.get(pk=1) - dc.catalog_json.pop('research_dataset_schema') + dc.catalog_json.pop("research_dataset_schema") dc.save() - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_catalog_record_create_with_other_schema(self): @@ -635,21 +805,23 @@ def test_catalog_record_create_with_other_schema(self): Ensure that dataset json schema validation works with other json schemas than the default IDA """ - self.cr_test_data['research_dataset']['remote_resources'] = [ - {'title': 'title'}, - {'title': 'title'} + self.cr_test_data["research_dataset"]["remote_resources"] = [ + {"title": "title"}, + {"title": "title"}, ] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.cr_test_data['research_dataset']['remote_resources'] = [ - {'title': 'title'}, - {'title': 'title'}, - {'woah': 'this should give a failure, since title is a required field, and it is missing'} + self.cr_test_data["research_dataset"]["remote_resources"] = [ + {"title": "title"}, + {"title": "title"}, + { + "woah": "this should give a failure, since title is a required field, and it is missing" + }, ] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_catalog_record_draft_is_validated_with_draft_schema(self): @@ -658,23 +830,22 @@ def test_catalog_record_draft_is_validated_with_draft_schema(self): of the chosen datacatalog. """ cr = deepcopy(self.cr_test_data) - cr['data_catalog'] = 2 # ida catalog - response = self.client.post('/rest/v2/datasets?draft', cr, format="json") + cr["data_catalog"] = 2 # ida catalog + response = self.client.post("/rest/v2/datasets?draft", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr = response.data - cr['research_dataset']['remote_resources'] = [ - { - 'title': 'title', - 'use_category': {'identifier': 'source'} - } + cr["research_dataset"]["remote_resources"] = [ + {"title": "title", "use_category": {"identifier": "source"}} ] response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure that dataset is validated against correct schema when publishing - response = self.client.post(f'/rpc/v2/datasets/publish_dataset?identifier={cr["id"]}', format="json") + response = self.client.post( + f'/rpc/v2/datasets/publish_dataset?identifier={cr["id"]}', format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_catalog_record_ref_data_validation_with_other_schema(self): @@ -682,21 +853,22 @@ def test_catalog_record_ref_data_validation_with_other_schema(self): Ensure that dataset reference data validation and population works with other json schemas than the default IDA. Ref data validation should be schema agnostic """ - self.cr_test_data['research_dataset']['other_identifier'] = [ + self.cr_test_data["research_dataset"]["other_identifier"] = [ { - 'notation': 'urn:1', - 'type': { - 'identifier': 'doi', - } + "notation": "urn:1", + "type": { + "identifier": "doi", + }, } ] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual( - 'uri.suomi.fi' in response.data['research_dataset']['other_identifier'][0]['type']['identifier'], + "uri.suomi.fi" + in response.data["research_dataset"]["other_identifier"][0]["type"]["identifier"], True, - 'Identifier type should have been populated with data from ref data' + "Identifier type should have been populated with data from ref data", ) @@ -708,38 +880,52 @@ class CatalogRecordApiWriteUpdateTests(CatalogRecordApiWriteCommon): # def test_update_catalog_record(self): - cr = self.client.get('/rest/v2/datasets/1').data - cr['preservation_description'] = 'what' + cr = self.client.get("/rest/v2/datasets/1").data + cr["preservation_description"] = "what" - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['preservation_description'], 'what') + self.assertEqual(response.data["preservation_description"], "what") cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(cr.date_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), True, - 'Timestamp should have been updated during object update') + self.assertEqual( + cr.date_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), + True, + "Timestamp should have been updated during object update", + ) def test_update_catalog_record_error_using_preferred_identifier(self): - cr = self.client.get('/rest/v2/datasets/1').data - response = self.client.put('/rest/v2/datasets/%s' % cr['research_dataset']['preferred_identifier'], - { 'whatever': 123 }, format="json") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, - 'Update operation should return 404 when using preferred_identifier') + cr = self.client.get("/rest/v2/datasets/1").data + response = self.client.put( + "/rest/v2/datasets/%s" % cr["research_dataset"]["preferred_identifier"], + {"whatever": 123}, + format="json", + ) + self.assertEqual( + response.status_code, + status.HTTP_404_NOT_FOUND, + "Update operation should return 404 when using preferred_identifier", + ) def test_update_catalog_record_error_required_fields(self): """ Field 'research_dataset' is missing, which should result in an error, since PUT replaces an object and requires all 'required' fields to be present. """ - cr = self.client.get('/rest/v2/datasets/1').data - cr.pop('research_dataset') - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1").data + cr.pop("research_dataset") + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('research_dataset' in response.data.keys(), True, - 'Error for field \'research_dataset\' is missing from response.data') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "Error for field 'research_dataset' is missing from response.data", + ) def test_update_catalog_record_not_found(self): - response = self.client.put('/rest/v2/datasets/doesnotexist', self.cr_test_data, format="json") + response = self.client.put( + "/rest/v2/datasets/doesnotexist", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_update_catalog_record_contract(self): @@ -748,154 +934,165 @@ def test_update_catalog_record_contract(self): old_contract_id = cr.contract.id # update contract to any different contract - cr_1 = self.client.get('/rest/v2/datasets/%d' % cr.id).data - cr_1['contract'] = Contract.objects.all().exclude(pk=old_contract_id).first().id + cr_1 = self.client.get("/rest/v2/datasets/%d" % cr.id).data + cr_1["contract"] = Contract.objects.all().exclude(pk=old_contract_id).first().id - response = self.client.put('/rest/v2/datasets/%d' % cr.id, cr_1, format="json") + response = self.client.put("/rest/v2/datasets/%d" % cr.id, cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) new_contract_id = CatalogRecordV2.objects.get(pk=cr.id).contract.id - self.assertNotEqual(old_contract_id, new_contract_id, 'Contract should have changed') + self.assertNotEqual(old_contract_id, new_contract_id, "Contract should have changed") # # update list operations PUT # def test_catalog_record_update_list(self): - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1['preservation_description'] = 'updated description' + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1["preservation_description"] = "updated description" - cr_2 = self.client.get('/rest/v2/datasets/2').data - cr_2['preservation_description'] = 'second updated description' + cr_2 = self.client.get("/rest/v2/datasets/2").data + cr_2["preservation_description"] = "second updated description" - response = self.client.put('/rest/v2/datasets', [ cr_1, cr_2 ], format="json") + response = self.client.put("/rest/v2/datasets", [cr_1, cr_2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['success']), 2) + self.assertEqual(len(response.data["success"]), 2) updated_cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, 'updated description') + self.assertEqual(updated_cr.preservation_description, "updated description") updated_cr = CatalogRecordV2.objects.get(pk=2) - self.assertEqual(updated_cr.preservation_description, 'second updated description') + self.assertEqual(updated_cr.preservation_description, "second updated description") def test_catalog_record_update_list_error_one_fails(self): - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1['preservation_description'] = 'updated description' + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1["preservation_description"] = "updated description" # data catalog is a required field, should therefore fail - cr_2 = self.client.get('/rest/v2/datasets/2').data - cr_2.pop('data_catalog', None) + cr_2 = self.client.get("/rest/v2/datasets/2").data + cr_2.pop("data_catalog", None) - response = self.client.put('/rest/v2/datasets', [ cr_1, cr_2 ], format="json") + response = self.client.put("/rest/v2/datasets", [cr_1, cr_2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(isinstance(response.data['success'], list), True, - 'return data should contain key success, which is a list') - self.assertEqual(len(response.data['success']), 1) - self.assertEqual(len(response.data['failed']), 1) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual( + isinstance(response.data["success"], list), + True, + "return data should contain key success, which is a list", + ) + self.assertEqual(len(response.data["success"]), 1) + self.assertEqual(len(response.data["failed"]), 1) updated_cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, 'updated description') + self.assertEqual(updated_cr.preservation_description, "updated description") def test_catalog_record_update_list_error_key_not_found(self): # does not have identifier key - cr_1 = self.client.get('/rest/v2/datasets/1').data - cr_1.pop('id') - cr_1.pop('identifier') - cr_1['research_dataset'].pop('metadata_version_identifier') + cr_1 = self.client.get("/rest/v2/datasets/1").data + cr_1.pop("id") + cr_1.pop("identifier") + cr_1["research_dataset"].pop("metadata_version_identifier") - cr_2 = self.client.get('/rest/v2/datasets/2').data - cr_2['preservation_description'] = 'second updated description' + cr_2 = self.client.get("/rest/v2/datasets/2").data + cr_2["preservation_description"] = "second updated description" - response = self.client.put('/rest/v2/datasets', [ cr_1, cr_2 ], format="json") + response = self.client.put("/rest/v2/datasets", [cr_1, cr_2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1) - self.assertEqual(len(response.data['failed']), 1) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1) + self.assertEqual(len(response.data["failed"]), 1) def test_catalog_record_deprecated_and_date_deprecated_cannot_be_set(self): # Test catalog record's deprecated field cannot be set with POST, PUT or PATCH initial_deprecated = True - self.cr_test_data['deprecated'] = initial_deprecated - self.cr_test_data['date_deprecated'] = '2018-01-01T00:00:00' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") - self.assertEqual(response.data['deprecated'], False) - self.assertTrue('date_deprecated' not in response.data) - - response_json = self.client.get('/rest/v2/datasets/1').data - initial_deprecated = response_json['deprecated'] - response_json['deprecated'] = not initial_deprecated - response_json['date_deprecated'] = '2018-01-01T00:00:00' - response = self.client.put('/rest/v2/datasets/1', response_json, format="json") + self.cr_test_data["deprecated"] = initial_deprecated + self.cr_test_data["date_deprecated"] = "2018-01-01T00:00:00" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") + self.assertEqual(response.data["deprecated"], False) + self.assertTrue("date_deprecated" not in response.data) + + response_json = self.client.get("/rest/v2/datasets/1").data + initial_deprecated = response_json["deprecated"] + response_json["deprecated"] = not initial_deprecated + response_json["date_deprecated"] = "2018-01-01T00:00:00" + response = self.client.put("/rest/v2/datasets/1", response_json, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['deprecated'], initial_deprecated) - self.assertTrue('date_deprecated' not in response.data) + self.assertEqual(response.data["deprecated"], initial_deprecated) + self.assertTrue("date_deprecated" not in response.data) - initial_deprecated = self.client.get('/rest/v2/datasets/1').data['deprecated'] - response = self.client.patch('/rest/v2/datasets/1', { 'deprecated': not initial_deprecated }, format="json") + initial_deprecated = self.client.get("/rest/v2/datasets/1").data["deprecated"] + response = self.client.patch( + "/rest/v2/datasets/1", {"deprecated": not initial_deprecated}, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['deprecated'], initial_deprecated) - self.assertTrue('date_deprecated' not in response.data) + self.assertEqual(response.data["deprecated"], initial_deprecated) + self.assertTrue("date_deprecated" not in response.data) def test_change_datacatalog_ATT_to_IDA(self): cr = self._get_new_full_test_att_cr_data() # create ATT data catalog - dc_att = self._get_object_from_test_data('datacatalog', 4) - dc_att['catalog_json']['identifier'] = 'urn:nbn:fi:att:data-catalog-att' - dc_att = self.client.post('/rest/v2/datacatalogs', dc_att, format="json").data + dc_att = self._get_object_from_test_data("datacatalog", 4) + dc_att["catalog_json"]["identifier"] = "urn:nbn:fi:att:data-catalog-att" + dc_att = self.client.post("/rest/v2/datacatalogs", dc_att, format="json").data # create IDA data catalog - dc_ida = self._get_object_from_test_data('datacatalog') - dc_ida['catalog_json']['identifier'] = 'urn:nbn:fi:att:data-catalog-ida' - dc_ida = self.client.post('/rest/v2/datacatalogs', dc_ida, format="json").data + dc_ida = self._get_object_from_test_data("datacatalog") + dc_ida["catalog_json"]["identifier"] = "urn:nbn:fi:att:data-catalog-ida" + dc_ida = self.client.post("/rest/v2/datacatalogs", dc_ida, format="json").data # create ATT catalog record - cr['data_catalog'] = dc_att - cr_att = self.client.post('/rest/v2/datasets', cr, format="json").data + cr["data_catalog"] = dc_att + cr_att = self.client.post("/rest/v2/datasets", cr, format="json").data # change data catalog to IDA - cr_id = cr_att['id'] - cr_att['data_catalog']['id'] = dc_ida['id'] - cr_att['data_catalog']['identifier'] = dc_ida['catalog_json']['identifier'] - cr_ida = self.client.put('/rest/v2/datasets/%d' % cr_id, cr_att, format="json") + cr_id = cr_att["id"] + cr_att["data_catalog"]["id"] = dc_ida["id"] + cr_att["data_catalog"]["identifier"] = dc_ida["catalog_json"]["identifier"] + cr_ida = self.client.put("/rest/v2/datasets/%d" % cr_id, cr_att, format="json") self.assertEqual(cr_ida.status_code, status.HTTP_200_OK, cr_ida) - self.assertTrue(not all(item in cr_ida.data['research_dataset'].keys() for item in - ['remote_resources', 'total_remote_resources_byte_size'])) - self.assertTrue('metadata_version_identifier' in cr_ida.data['research_dataset'].keys()) + self.assertTrue( + not all( + item in cr_ida.data["research_dataset"].keys() + for item in ["remote_resources", "total_remote_resources_byte_size"] + ) + ) + self.assertTrue("metadata_version_identifier" in cr_ida.data["research_dataset"].keys()) files = { - 'files': [{ - "title": "File metadata title 1", - "file_type": { - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", - "pref_label": { - "en": "Text", - "fi": "Teksti", - "und": "Teksti" - } - }, - "identifier": "pid:urn:1", - "use_category": { - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", - "pref_label": { - "en": "Source material", - "fi": "LƤhdeaineisto", - "und": "LƤhdeaineisto" - } + "files": [ + { + "title": "File metadata title 1", + "file_type": { + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", + "pref_label": {"en": "Text", "fi": "Teksti", "und": "Teksti"}, + }, + "identifier": "pid:urn:1", + "use_category": { + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", + "pref_label": { + "en": "Source material", + "fi": "LƤhdeaineisto", + "und": "LƤhdeaineisto", + }, + }, } - }]} - cr_ida = self.client.post('/rest/v2/datasets/%d/files' % cr_id, files, format="json") + ] + } + cr_ida = self.client.post("/rest/v2/datasets/%d/files" % cr_id, files, format="json") self.assertEqual(cr_ida.status_code, status.HTTP_200_OK, cr_ida.data) - self.assertTrue(cr_ida.data['files_added'] == 1, 'Must indicate of one file addition') + self.assertTrue(cr_ida.data["files_added"] == 1, "Must indicate of one file addition") - response = self.client.get('/rest/v2/datasets/%d?include_user_metadata' % cr_id).data - self.assertTrue(len(response['research_dataset']['files']) == 1, 'Dataset must contain one file') + response = self.client.get("/rest/v2/datasets/%d?include_user_metadata" % cr_id).data + self.assertTrue( + len(response["research_dataset"]["files"]) == 1, + "Dataset must contain one file", + ) class CatalogRecordApiWritePartialUpdateTests(CatalogRecordApiWriteCommon): @@ -906,15 +1103,25 @@ class CatalogRecordApiWritePartialUpdateTests(CatalogRecordApiWriteCommon): # def test_update_catalog_record_partial(self): - new_data_catalog = self._get_object_from_test_data('datacatalog', requested_index=1)['id'] + new_data_catalog = self._get_object_from_test_data("datacatalog", requested_index=1)["id"] new_data = { "data_catalog": new_data_catalog, } - response = self.client.patch('/rest/v2/datasets/%s' % self.identifier, new_data, format="json") + response = self.client.patch( + "/rest/v2/datasets/%s" % self.identifier, new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('research_dataset' in response.data.keys(), True, 'PATCH operation should return full content') - self.assertEqual(response.data['data_catalog']['id'], new_data_catalog, 'Field data_catalog was not updated') + self.assertEqual( + "research_dataset" in response.data.keys(), + True, + "PATCH operation should return full content", + ) + self.assertEqual( + response.data["data_catalog"]["id"], + new_data_catalog, + "Field data_catalog was not updated", + ) # # update list operations PATCH @@ -922,59 +1129,86 @@ def test_update_catalog_record_partial(self): def test_catalog_record_partial_update_list(self): test_data = {} - test_data['id'] = 1 - test_data['preservation_description'] = 'description' + test_data["id"] = 1 + test_data["preservation_description"] = "description" second_test_data = {} - second_test_data['id'] = 2 - second_test_data['preservation_description'] = 'description 2' + second_test_data["id"] = 2 + second_test_data["preservation_description"] = "description 2" - response = self.client.patch('/rest/v2/datasets', [test_data, second_test_data], format="json") + response = self.client.patch( + "/rest/v2/datasets", [test_data, second_test_data], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data, True, 'response.data should contain list of changed objects') - self.assertEqual(len(response.data), 2, 'response.data should contain 2 changed objects') - self.assertEqual('research_dataset' in response.data['success'][0]['object'], True, - 'response.data should contain full objects') + self.assertEqual( + "success" in response.data, + True, + "response.data should contain list of changed objects", + ) + self.assertEqual(len(response.data), 2, "response.data should contain 2 changed objects") + self.assertEqual( + "research_dataset" in response.data["success"][0]["object"], + True, + "response.data should contain full objects", + ) updated_cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, 'description') + self.assertEqual(updated_cr.preservation_description, "description") def test_catalog_record_partial_update_list_error_one_fails(self): test_data = {} - test_data['id'] = 1 - test_data['preservation_description'] = 'description' + test_data["id"] = 1 + test_data["preservation_description"] = "description" second_test_data = {} - second_test_data['preservation_state'] = 555 # value not allowed - second_test_data['id'] = 2 + second_test_data["preservation_state"] = 555 # value not allowed + second_test_data["id"] = 2 - response = self.client.patch('/rest/v2/datasets', [test_data, second_test_data], format="json") + response = self.client.patch( + "/rest/v2/datasets", [test_data, second_test_data], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1, 'success list should contain one item') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - self.assertEqual('preservation_state' in response.data['failed'][0]['errors'], True, - response.data['failed'][0]['errors']) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1, "success list should contain one item") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + self.assertEqual( + "preservation_state" in response.data["failed"][0]["errors"], + True, + response.data["failed"][0]["errors"], + ) def test_catalog_record_partial_update_list_error_key_not_found(self): # does not have identifier key test_data = {} - test_data['preservation_state'] = 10 + test_data["preservation_state"] = 10 second_test_data = {} - second_test_data['id'] = 2 - second_test_data['preservation_state'] = 20 + second_test_data["id"] = 2 + second_test_data["preservation_state"] = 20 - response = self.client.patch('/rest/v2/datasets', [test_data, second_test_data], format="json") + response = self.client.patch( + "/rest/v2/datasets", [test_data, second_test_data], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1, 'success list should contain one item') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - self.assertEqual('detail' in response.data['failed'][0]['errors'], True, response.data['failed'][0]['errors']) - self.assertEqual('identifying key' in response.data['failed'][0]['errors']['detail'][0], True, - response.data['failed'][0]['errors']) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1, "success list should contain one item") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + self.assertEqual( + "detail" in response.data["failed"][0]["errors"], + True, + response.data["failed"][0]["errors"], + ) + self.assertEqual( + "identifying key" in response.data["failed"][0]["errors"]["detail"][0], + True, + response.data["failed"][0]["errors"], + ) class CatalogRecordApiWriteDeleteTests(CatalogRecordApiWriteCommon): @@ -987,7 +1221,7 @@ class CatalogRecordApiWriteDeleteTests(CatalogRecordApiWriteCommon): # def test_delete_catalog_record(self): - url = '/rest/v2/datasets/%s' % self.identifier + url = "/rest/v2/datasets/%s" % self.identifier response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) response = self.client.get(url) @@ -995,76 +1229,101 @@ def test_delete_catalog_record(self): try: deleted_catalog_record = CatalogRecordV2.objects.get(identifier=self.identifier) - raise Exception('Deleted CatalogRecord should not be retrievable from the default objects table') + raise Exception( + "Deleted CatalogRecord should not be retrievable from the default objects table" + ) except CatalogRecordV2.DoesNotExist: # successful test should go here, instead of raising the expection in try: block pass try: - deleted_catalog_record = CatalogRecordV2.objects_unfiltered.get(identifier=self.identifier) + deleted_catalog_record = CatalogRecordV2.objects_unfiltered.get( + identifier=self.identifier + ) except CatalogRecordV2.DoesNotExist: - raise Exception('Deleted CatalogRecord should not be deleted from the db, but marked as removed') + raise Exception( + "Deleted CatalogRecord should not be deleted from the db, but marked as removed" + ) self.assertEqual(deleted_catalog_record.removed, True) self.assertEqual(deleted_catalog_record.identifier, self.identifier) - self.assertEqual(deleted_catalog_record.date_modified, deleted_catalog_record.date_removed, - 'date_modified should be updated') + self.assertEqual( + deleted_catalog_record.date_modified, + deleted_catalog_record.date_removed, + "date_modified should be updated", + ) def test_delete_catalog_record_error_using_preferred_identifier(self): - url = '/rest/v2/datasets/%s' % self.preferred_identifier + url = "/rest/v2/datasets/%s" % self.preferred_identifier response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_bulk_delete_catalog_record_permissions(self): # create catalog with 'metax' edit permissions and create dataset with this catalog as 'metax' user cr = self._get_new_test_cr_data() - cr.pop('id') - catalog = self._get_object_from_test_data('datacatalog', requested_index=0) - catalog.pop('id') - catalog['catalog_json']['identifier'] = 'metax-catalog' - catalog['catalog_record_services_edit'] = 'metax' - catalog = self.client.post('/rest/v2/datacatalogs', catalog, format="json") - cr['data_catalog'] = {'id': catalog.data['id'], 'identifier': catalog.data['catalog_json']['identifier']} - - self._use_http_authorization(username='metax') - response = self.client.post('/rest/v2/datasets/', cr, format="json") - metax_cr = response.data['id'] + cr.pop("id") + catalog = self._get_object_from_test_data("datacatalog", requested_index=0) + catalog.pop("id") + catalog["catalog_json"]["identifier"] = "metax-catalog" + catalog["catalog_record_services_edit"] = "metax" + catalog = self.client.post("/rest/v2/datacatalogs", catalog, format="json") + cr["data_catalog"] = { + "id": catalog.data["id"], + "identifier": catalog.data["catalog_json"]["identifier"], + } + + self._use_http_authorization(username="metax") + response = self.client.post("/rest/v2/datasets/", cr, format="json") + metax_cr = response.data["id"] # create catalog with 'testuser' edit permissions and create dataset with this catalog as 'testuser' user cr = self._get_new_test_cr_data() - cr.pop('id') - catalog = self._get_object_from_test_data('datacatalog', requested_index=1) - catalog.pop('id') - catalog['catalog_json']['identifier'] = 'testuser-catalog' - catalog['catalog_record_services_edit'] = 'testuser' - catalog = self.client.post('/rest/v2/datacatalogs', catalog, format="json") - cr['data_catalog'] = {'id': catalog.data['id'], 'identifier': catalog.data['catalog_json']['identifier']} - - self._use_http_authorization(username='testuser', password='testuserpassword') - response = self.client.post('/rest/v2/datasets/', cr, format="json") - testuser_cr = response.data['id'] + cr.pop("id") + catalog = self._get_object_from_test_data("datacatalog", requested_index=1) + catalog.pop("id") + catalog["catalog_json"]["identifier"] = "testuser-catalog" + catalog["catalog_record_services_edit"] = "testuser" + catalog = self.client.post("/rest/v2/datacatalogs", catalog, format="json") + cr["data_catalog"] = { + "id": catalog.data["id"], + "identifier": catalog.data["catalog_json"]["identifier"], + } + + self._use_http_authorization(username="testuser", password="testuserpassword") + response = self.client.post("/rest/v2/datasets/", cr, format="json") + testuser_cr = response.data["id"] # after trying to delete as 'testuser' only one catalog is deleted - response = self.client.delete('/rest/v2/datasets', [metax_cr, testuser_cr], format="json") + response = self.client.delete("/rest/v2/datasets", [metax_cr, testuser_cr], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.post('/rest/v2/datasets/list?pagination=false', [metax_cr, testuser_cr], format="json") + response = self.client.post( + "/rest/v2/datasets/list?pagination=false", + [metax_cr, testuser_cr], + format="json", + ) self.assertTrue(len(response.data), 1) # deleting from list of not accessable resource - response = self.client.delete('/rest/datasets', [metax_cr], format="json") + response = self.client.delete("/rest/datasets", [metax_cr], format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.post('/rest/v2/datasets/list?pagination=false', [metax_cr, testuser_cr], format="json") + response = self.client.post( + "/rest/v2/datasets/list?pagination=false", + [metax_cr, testuser_cr], + format="json", + ) self.assertTrue(len(response.data), 1) def test_bulk_delete_catalog_record(self): ids = [1, 2, 3] - identifiers = CatalogRecordV2.objects.filter(pk__in=[4, 5, 6]).values_list('identifier', flat=True) + identifiers = CatalogRecordV2.objects.filter(pk__in=[4, 5, 6]).values_list( + "identifier", flat=True + ) for crs in [ids, identifiers]: - response = self.client.delete('/rest/datasets', crs, format="json") + response = self.client.delete("/rest/datasets", crs, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(response.data == [1, 2, 3] or response.data == [4, 5, 6]) - response = self.client.post('/rest/datasets/list?pagination=false', crs, format="json") + response = self.client.post("/rest/datasets/list?pagination=false", crs, format="json") self.assertFalse(response.data) for cr in crs: @@ -1074,21 +1333,25 @@ def test_bulk_delete_catalog_record(self): deleted = CatalogRecordV2.objects_unfiltered.get(identifier=cr) self.assertEqual(deleted.removed, True) - self.assertEqual(deleted.date_modified, deleted.date_removed, - 'date_modified should be updated') + self.assertEqual( + deleted.date_modified, + deleted.date_removed, + "date_modified should be updated", + ) # failing tests ids = [1000, 2000] - identifiers = ['1000', '2000'] + identifiers = ["1000", "2000"] for crs in [ids, identifiers]: - response = self.client.delete('/rest/datasets', ids, format="json") + response = self.client.delete("/rest/datasets", ids, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) ids = [] - response = self.client.delete('/rest/datasets', ids, format="json") + response = self.client.delete("/rest/datasets", ids, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue('Received empty list of identifiers' in response.data['detail'][0]) + self.assertTrue("Received empty list of identifiers" in response.data["detail"][0]) + class CatalogRecordApiWriteAlternateRecords(CatalogRecordApiWriteCommon): @@ -1105,8 +1368,8 @@ class CatalogRecordApiWriteAlternateRecords(CatalogRecordApiWriteCommon): def setUp(self): super(CatalogRecordApiWriteAlternateRecords, self).setUp() self.preferred_identifier = self._set_preferred_identifier_to_record(pk=1, data_catalog=1) - self.cr_test_data['research_dataset']['preferred_identifier'] = self.preferred_identifier - self.cr_test_data['data_catalog'] = None + self.cr_test_data["research_dataset"]["preferred_identifier"] = self.preferred_identifier + self.cr_test_data["data_catalog"] = None def test_alternate_record_set_is_created_if_it_doesnt_exist(self): """ @@ -1115,21 +1378,29 @@ def test_alternate_record_set_is_created_if_it_doesnt_exist(self): """ # new record is saved to catalog 3, which does not support versioning - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 existing_records_count = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}).count() - self.assertEqual(existing_records_count, 1, - 'in the beginning, there should be only one record with pref id %s' - % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ).count() + self.assertEqual( + existing_records_count, + 1, + "in the beginning, there should be only one record with pref id %s" + % self.preferred_identifier, + ) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) records = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(len(records), 2, - 'after, there should be two records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual( + len(records), + 2, + "after, there should be two records with pref id %s" % self.preferred_identifier, + ) # both records are moved to same set ars_id = records[0].alternate_record_set.id @@ -1138,7 +1409,7 @@ def test_alternate_record_set_is_created_if_it_doesnt_exist(self): # records in the set are the ones expected self.assertEqual(records[0].id, 1) - self.assertEqual(records[1].id, response.data['id']) + self.assertEqual(records[1].id, response.data["id"]) # records in the set are indeed in different catalogs self.assertEqual(records[0].data_catalog.id, 1) @@ -1151,20 +1422,29 @@ def test_append_to_existing_alternate_record_set_if_it_exists(self): to the existing alternate_record_set. """ self._set_preferred_identifier_to_record(pk=2, data_catalog=2) - self.cr_test_data['data_catalog'] = 3 + self.cr_test_data["data_catalog"] = 3 existing_records_count = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}).count() - self.assertEqual(existing_records_count, 2, - 'in the beginning, there should be two records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ).count() + self.assertEqual( + existing_records_count, + 2, + "in the beginning, there should be two records with pref id %s" + % self.preferred_identifier, + ) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) records = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(len(records), 3, - 'after, there should be three records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual( + len(records), + 3, + "after, there should be three records with pref id %s" % self.preferred_identifier, + ) # all records belong to same set ars_id = records[0].alternate_record_set.id @@ -1175,7 +1455,7 @@ def test_append_to_existing_alternate_record_set_if_it_exists(self): # records in the set are the ones expected self.assertEqual(records[0].id, 1) self.assertEqual(records[1].id, 2) - self.assertEqual(records[2].id, response.data['id']) + self.assertEqual(records[2].id, response.data["id"]) # records in the set are indeed in different catalogs self.assertEqual(records[0].data_catalog.id, 1) @@ -1192,15 +1472,18 @@ def test_record_is_removed_from_alternate_record_set_when_deleted(self): # initial conditions will have 3 records in the same set. self._set_and_ensure_initial_conditions() - response = self.client.delete('/rest/v2/datasets/2', format="json") + response = self.client.delete("/rest/v2/datasets/2", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) # check resulting conditions records = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) self.assertEqual(records[0].alternate_record_set.records.count(), 2) - def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_and_one_record_left(self): + def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_and_one_record_left( + self, + ): """ Same as above, but updating a record in a catalog, which does NOT support versioning. In this case, the the records itself gets updated, and removed from the old alternate_record_set. @@ -1217,22 +1500,28 @@ def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_a old_ars_id = CatalogRecordV2.objects.get(pk=2).alternate_record_set.id # retrieve record id=2, and change its preferred identifier - response = self.client.get('/rest/v2/datasets/2', format="json") - data = {'research_dataset': response.data['research_dataset']} - data['research_dataset']['preferred_identifier'] = 'a:new:identifier:here' + response = self.client.get("/rest/v2/datasets/2", format="json") + data = {"research_dataset": response.data["research_dataset"]} + data["research_dataset"]["preferred_identifier"] = "a:new:identifier:here" # updating preferred_identifier - a new version is NOT created - response = self.client.patch('/rest/v2/datasets/2', data=data, format="json") + response = self.client.patch("/rest/v2/datasets/2", data=data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) records = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': original_preferred_identifier }) + research_dataset__contains={"preferred_identifier": original_preferred_identifier} + ) self.assertEqual(records.count(), 1) - with self.assertRaises(AlternateRecordSet.DoesNotExist, msg='alternate record set should have been deleted'): + with self.assertRaises( + AlternateRecordSet.DoesNotExist, + msg="alternate record set should have been deleted", + ): AlternateRecordSet.objects.get(pk=old_ars_id) - def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_left(self): + def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_left( + self, + ): """ Same princible as above, but through deleting a record, instead of updating a record. @@ -1241,14 +1530,18 @@ def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_ self._set_preferred_identifier_to_record(pk=2, data_catalog=2) old_ars_id = CatalogRecordV2.objects.get(pk=2).alternate_record_set.id - response = self.client.delete('/rest/v2/datasets/2', format="json") + response = self.client.delete("/rest/v2/datasets/2", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) records = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(records.count(), 1, 'should be only record with this identifier left now') + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual(records.count(), 1, "should be only record with this identifier left now") - with self.assertRaises(AlternateRecordSet.DoesNotExist, msg='alternate record set should have been deleted'): + with self.assertRaises( + AlternateRecordSet.DoesNotExist, + msg="alternate record set should have been deleted", + ): AlternateRecordSet.objects.get(pk=old_ars_id) def test_alternate_record_set_is_included_in_responses(self): @@ -1256,63 +1549,55 @@ def test_alternate_record_set_is_included_in_responses(self): Details of a dataset should contain field alternate_record_set in it. For a particular record, the set should not contain its own metadata_version_identifier in the set. """ - self.cr_test_data['data_catalog'] = 3 - msg_self_should_not_be_listed = 'identifier of the record itself should not be listed' + self.cr_test_data["data_catalog"] = 3 + msg_self_should_not_be_listed = "identifier of the record itself should not be listed" - response_1 = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") - response_2 = self.client.get('/rest/v2/datasets/1', format="json") + response_1 = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") + response_2 = self.client.get("/rest/v2/datasets/1", format="json") self.assertEqual(response_1.status_code, status.HTTP_201_CREATED) - self.assertEqual('alternate_record_set' in response_1.data, True) + self.assertEqual("alternate_record_set" in response_1.data, True) self.assertEqual( - response_1.data['identifier'] - not in response_1.data['alternate_record_set'], + response_1.data["identifier"] not in response_1.data["alternate_record_set"], True, - msg_self_should_not_be_listed + msg_self_should_not_be_listed, ) self.assertEqual( - response_2.data['identifier'] - in response_1.data['alternate_record_set'], - True + response_2.data["identifier"] in response_1.data["alternate_record_set"], + True, ) - self.cr_test_data.update({'data_catalog': 4}) - response_3 = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data.update({"data_catalog": 4}) + response_3 = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response_3.status_code, status.HTTP_201_CREATED) - self.assertEqual('alternate_record_set' in response_3.data, True) + self.assertEqual("alternate_record_set" in response_3.data, True) self.assertEqual( - response_1.data['identifier'] - in response_3.data['alternate_record_set'], - True + response_1.data["identifier"] in response_3.data["alternate_record_set"], + True, ) self.assertEqual( - response_2.data['identifier'] - in response_3.data['alternate_record_set'], - True + response_2.data["identifier"] in response_3.data["alternate_record_set"], + True, ) self.assertEqual( - response_3.data['identifier'] - not in response_3.data['alternate_record_set'], + response_3.data["identifier"] not in response_3.data["alternate_record_set"], True, - msg_self_should_not_be_listed + msg_self_should_not_be_listed, ) - response_2 = self.client.get('/rest/v2/datasets/1', format="json") - self.assertEqual('alternate_record_set' in response_2.data, True) + response_2 = self.client.get("/rest/v2/datasets/1", format="json") + self.assertEqual("alternate_record_set" in response_2.data, True) self.assertEqual( - response_1.data['identifier'] - in response_2.data['alternate_record_set'], - True + response_1.data["identifier"] in response_2.data["alternate_record_set"], + True, ) self.assertEqual( - response_3.data['identifier'] - in response_2.data['alternate_record_set'], - True + response_3.data["identifier"] in response_2.data["alternate_record_set"], + True, ) self.assertEqual( - response_2.data['identifier'] - not in response_2.data['alternate_record_set'], + response_2.data["identifier"] not in response_2.data["alternate_record_set"], True, - msg_self_should_not_be_listed + msg_self_should_not_be_listed, ) def _set_preferred_identifier_to_record(self, pk=1, data_catalog=1): @@ -1323,9 +1608,9 @@ def _set_preferred_identifier_to_record(self, pk=1, data_catalog=1): Note that if calling this method several times, this will also create an alternate_record_set (by calling _handle_preferred_identifier_changed()). """ - unique_identifier = 'im unique yo' + unique_identifier = "im unique yo" cr = CatalogRecordV2.objects.get(pk=pk) - cr.research_dataset['preferred_identifier'] = unique_identifier + cr.research_dataset["preferred_identifier"] = unique_identifier cr.data_catalog_id = data_catalog cr.force_save() cr._handle_preferred_identifier_changed() @@ -1343,9 +1628,14 @@ def _set_and_ensure_initial_conditions(self): # ensuring initial conditions... records = CatalogRecordV2.objects.filter( - research_dataset__contains={'preferred_identifier': self.preferred_identifier}) - self.assertEqual(len(records), 3, - 'in the beginning, there should be three records with pref id %s' % self.preferred_identifier) + research_dataset__contains={"preferred_identifier": self.preferred_identifier} + ) + self.assertEqual( + len(records), + 3, + "in the beginning, there should be three records with pref id %s" + % self.preferred_identifier, + ) ars_id = records[0].alternate_record_set.id self.assertEqual(records[0].alternate_record_set.id, ars_id) self.assertEqual(records[1].alternate_record_set.id, ars_id) @@ -1360,43 +1650,58 @@ def test_update_rd_title_creates_new_metadata_version(self): self._assert_metadata_version_count(response_1.data, 2) # get list of metadata versions to access contents... - response = self.client.get('/rest/v2/datasets/%d/metadata_versions' % response_1.data['id'], format="json") + response = self.client.get( + "/rest/v2/datasets/%d/metadata_versions" % response_1.data["id"], + format="json", + ) - response_2 = self.client.get('/rest/v2/datasets/%d/metadata_versions/%s' % - (self.pk, response.data[0]['metadata_version_identifier']), format="json") + response_2 = self.client.get( + "/rest/v2/datasets/%d/metadata_versions/%s" + % (self.pk, response.data[0]["metadata_version_identifier"]), + format="json", + ) self.assertEqual(response_2.status_code, status.HTTP_200_OK, response_2.data) - self.assertEqual('preferred_identifier' in response_2.data, True) + self.assertEqual("preferred_identifier" in response_2.data, True) # note! response_1 == cr, response_2 == rd - self.assertEqual(response_1.data['research_dataset']['preferred_identifier'], - response_2.data['preferred_identifier']) + self.assertEqual( + response_1.data["research_dataset"]["preferred_identifier"], + response_2.data["preferred_identifier"], + ) def test_dataset_version_lists_removed_records(self): # create version2 of a record - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_version_id = response.data['id'] + new_version_id = response.data["id"] # publish the new version - response = self.client.post(f'/rpc/v2/datasets/publish_dataset?identifier={new_version_id}', format="json") + response = self.client.post( + f"/rpc/v2/datasets/publish_dataset?identifier={new_version_id}", + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # delete version2 - response = self.client.delete(f'/rest/v2/datasets/{new_version_id}', format="json") + response = self.client.delete(f"/rest/v2/datasets/{new_version_id}", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) # check date_removed is listed and not None in deleted version - response = self.client.get('/rest/v2/datasets/1', format="json") + response = self.client.get("/rest/v2/datasets/1", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data['dataset_version_set'][0].get('date_removed')) - self.assertTrue(response.data['dataset_version_set'][0].get('date_removed') is not None) - self.assertFalse(response.data['dataset_version_set'][1].get('date_removed')) + self.assertTrue(response.data["dataset_version_set"][0].get("date_removed")) + self.assertTrue(response.data["dataset_version_set"][0].get("date_removed") is not None) + self.assertFalse(response.data["dataset_version_set"][1].get("date_removed")) def _assert_metadata_version_count(self, record, count): - response = self.client.get('/rest/v2/datasets/%d/metadata_versions' % record['id'], format="json") + response = self.client.get( + "/rest/v2/datasets/%d/metadata_versions" % record["id"], format="json" + ) self.assertEqual(len(response.data), count) def _set_cr_to_catalog(self, pk=None, dc=None): @@ -1411,41 +1716,49 @@ def _get_and_update_title(self, pk, params=None): Should not force preferred_identifier to change. """ - data = self.client.get('/rest/v2/datasets/%d' % pk, format="json").data - data['research_dataset']['title']['en'] = 'modified title' - return self.client.put('/rest/v2/datasets/%d%s' % (pk, params or ''), data, format="json") + data = self.client.get("/rest/v2/datasets/%d" % pk, format="json").data + data["research_dataset"]["title"]["en"] = "modified title" + return self.client.put("/rest/v2/datasets/%d%s" % (pk, params or ""), data, format="json") def test_allow_metadata_changes_after_deprecation(self): """ For deprecated datasets metadata changes are still allowed. Changing user metadata for files that are marked as removed (caused the deprecation) is not possible. """ - response = self.client.get('/rest/v2/datasets/1?include_user_metadata') + response = self.client.get("/rest/v2/datasets/1?include_user_metadata") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr = response.data - response = self.client.delete('/rest/v2/files/1') + response = self.client.delete("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # after the dataset is deprecated, metadata updates should still be ok - cr['research_dataset']['description'] = { + cr["research_dataset"]["description"] = { "en": "Updating new description for deprecated dataset should not create any problems" } - response = self.client.put('/rest/v2/datasets/%s' % cr['id'], cr, format="json") + response = self.client.put("/rest/v2/datasets/%s" % cr["id"], cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue('new description' in response.data['research_dataset']['description']['en'], - 'description field should be updated') + self.assertTrue( + "new description" in response.data["research_dataset"]["description"]["en"], + "description field should be updated", + ) - file_changes = { - 'files': [ cr['research_dataset']['files'][0] ] - } + file_changes = {"files": [cr["research_dataset"]["files"][0]]} - file_changes['files'][0]['title'] = 'Brand new title 1' + file_changes["files"][0]["title"] = "Brand new title 1" - response = self.client.put('/rest/v2/datasets/%s/files/user_metadata' % cr['id'], file_changes, format="json") + response = self.client.put( + "/rest/v2/datasets/%s/files/user_metadata" % cr["id"], + file_changes, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('The following files are not' in response.data['detail'][0], True, response.data) + self.assertEqual( + "The following files are not" in response.data["detail"][0], + True, + response.data, + ) class CatalogRecordApiWriteRemoteResources(CatalogRecordApiWriteCommon): @@ -1455,15 +1768,20 @@ class CatalogRecordApiWriteRemoteResources(CatalogRecordApiWriteCommon): """ def test_calculate_total_remote_resources_byte_size(self): - cr_with_rr = self._get_object_from_test_data('catalogrecord', requested_index=14) - rr = cr_with_rr['research_dataset']['remote_resources'] - total_remote_resources_byte_size = sum(res['byte_size'] for res in rr) - self.cr_att_test_data['research_dataset']['remote_resources'] = rr - response = self.client.post('/rest/v2/datasets', self.cr_att_test_data, format="json") + cr_with_rr = self._get_object_from_test_data("catalogrecord", requested_index=14) + rr = cr_with_rr["research_dataset"]["remote_resources"] + total_remote_resources_byte_size = sum(res["byte_size"] for res in rr) + self.cr_att_test_data["research_dataset"]["remote_resources"] = rr + response = self.client.post("/rest/v2/datasets", self.cr_att_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('total_remote_resources_byte_size' in response.data['research_dataset'], True) - self.assertEqual(response.data['research_dataset']['total_remote_resources_byte_size'], - total_remote_resources_byte_size) + self.assertEqual( + "total_remote_resources_byte_size" in response.data["research_dataset"], + True, + ) + self.assertEqual( + response.data["research_dataset"]["total_remote_resources_byte_size"], + total_remote_resources_byte_size, + ) class CatalogRecordApiWriteLegacyDataCatalogs(CatalogRecordApiWriteCommon): @@ -1477,55 +1795,57 @@ def setUp(self): Create a test-datacatalog that plays the role of a legacy catalog. """ super().setUp() - dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema='att').first() - dc.catalog_json['identifier'] = LEGACY_CATALOGS[0] + dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema="att").first() + dc.catalog_json["identifier"] = LEGACY_CATALOGS[0] dc.force_save() - del self.cr_test_data['research_dataset']['files'] - del self.cr_test_data['research_dataset']['total_files_byte_size'] + del self.cr_test_data["research_dataset"]["files"] + del self.cr_test_data["research_dataset"]["total_files_byte_size"] def test_legacy_catalog_pids_are_not_unique(self): # values provided as pid values in legacy catalogs are not required to be unique # within the catalog. - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" same_pid_ids = [] for i in range(3): - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], 'a') - same_pid_ids.append(response.data['id']) + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "a") + same_pid_ids.append(response.data["id"]) # pid can even be same as an existing dataset's pid in an ATT catalog real_pid = CatalogRecordV2.objects.get(pk=1).preferred_identifier - self.cr_test_data['research_dataset']['preferred_identifier'] = real_pid - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = real_pid + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], real_pid) + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], real_pid) def test_legacy_catalog_pid_must_be_provided(self): # pid cant be empty string - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # pid cant be omitted - del self.cr_test_data['research_dataset']['preferred_identifier'] - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + del self.cr_test_data["research_dataset"]["preferred_identifier"] + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_legacy_catalog_pids_update(self): # test setup - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # update record. in updates uniqueness also should not be checked modify = response.data real_pid = CatalogRecordV2.objects.get(pk=1).preferred_identifier - modify['research_dataset']['preferred_identifier'] = real_pid - response = self.client.put('/rest/v2/datasets/%s?include_legacy' % modify['id'], modify, format="json") + modify["research_dataset"]["preferred_identifier"] = real_pid + response = self.client.put( + "/rest/v2/datasets/%s?include_legacy" % modify["id"], modify, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_delete_legacy_catalog_dataset(self): @@ -1535,17 +1855,17 @@ def test_delete_legacy_catalog_dataset(self): """ # test setup - self.cr_test_data['data_catalog'] = LEGACY_CATALOGS[0] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # delete record - response = self.client.delete('/rest/v2/datasets/%s?include_legacy' % cr_id, format="json") + response = self.client.delete("/rest/v2/datasets/%s?include_legacy" % cr_id, format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) results_count = CatalogRecordV2.objects_unfiltered.filter(pk=cr_id).count() - self.assertEqual(results_count, 0, 'record should have been deleted permantly') + self.assertEqual(results_count, 0, "record should have been deleted permantly") class CatalogRecordApiWriteOwnerFields(CatalogRecordApiWriteCommon): @@ -1564,45 +1884,49 @@ def test_metadata_owner_org_is_copied_from_metadata_provider_org(self): """ # create - cr = self.client.get('/rest/v2/datasets/1', format="json").data - cr.pop('id') - cr.pop('identifier') - cr.pop('metadata_owner_org') - cr['research_dataset'].pop('preferred_identifier') - response = self.client.post('/rest/v2/datasets', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1", format="json").data + cr.pop("id") + cr.pop("identifier") + cr.pop("metadata_owner_org") + cr["research_dataset"].pop("preferred_identifier") + response = self.client.post("/rest/v2/datasets", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['metadata_owner_org'], response.data['metadata_provider_org']) + self.assertEqual( + response.data["metadata_owner_org"], response.data["metadata_provider_org"] + ) # update to null - update is prevented - cr = self.client.get('/rest/v2/datasets/1', format="json").data - original = cr['metadata_owner_org'] - cr['metadata_owner_org'] = None - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1", format="json").data + original = cr["metadata_owner_org"] + cr["metadata_owner_org"] = None + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['metadata_owner_org'], original) + self.assertEqual(response.data["metadata_owner_org"], original) # update with patch, where metadata_owner_org field is absent - value is not reverted back # to metadata_provider_org - response = self.client.patch('/rest/v2/datasets/1', { 'metadata_owner_org': 'abc' }, format="json") + response = self.client.patch( + "/rest/v2/datasets/1", {"metadata_owner_org": "abc"}, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.patch('/rest/v2/datasets/1', { 'contract': 1 }, format="json") - self.assertEqual(response.data['metadata_owner_org'], 'abc') + response = self.client.patch("/rest/v2/datasets/1", {"contract": 1}, format="json") + self.assertEqual(response.data["metadata_owner_org"], "abc") def test_metadata_provider_org_is_readonly_after_creating(self): - cr = self.client.get('/rest/v2/datasets/1', format="json").data - original = cr['metadata_provider_org'] - cr['metadata_provider_org'] = 'changed' - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1", format="json").data + original = cr["metadata_provider_org"] + cr["metadata_provider_org"] = "changed" + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['metadata_provider_org'], original) + self.assertEqual(response.data["metadata_provider_org"], original) def test_metadata_provider_user_is_readonly_after_creating(self): - cr = self.client.get('/rest/v2/datasets/1', format="json").data - original = cr['metadata_provider_user'] - cr['metadata_provider_user'] = 'changed' - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1", format="json").data + original = cr["metadata_provider_user"] + cr["metadata_provider_user"] = "changed" + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['metadata_provider_user'], original) + self.assertEqual(response.data["metadata_provider_user"], original) class CatalogRecordApiEndUserAccess(CatalogRecordApiWriteCommon): @@ -1618,19 +1942,19 @@ def setUp(self): dc = DataCatalog.objects.get(pk=1) catalog_json = dc.catalog_json for identifier in END_USER_ALLOWED_DATA_CATALOGS: - catalog_json['identifier'] = identifier + catalog_json["identifier"] = identifier dc = DataCatalog.objects.create( catalog_json=catalog_json, date_created=get_tz_aware_now_without_micros(), - catalog_record_services_create='testuser,api_auth_user,metax', - catalog_record_services_edit='testuser,api_auth_user,metax' + catalog_record_services_create="testuser,api_auth_user,metax", + catalog_record_services_edit="testuser,api_auth_user,metax", ) self.token = get_test_oidc_token() # by default, use the unmodified token. to use a different/modified token # for various test scenarions, alter self.token, and call the below method again - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) # no reason to test anything related to failed authentication, since failed # authentication stops the request from proceeding anywhere @@ -1638,160 +1962,170 @@ def setUp(self): def _set_cr_owner_to_token_user(self, cr_id): cr = CatalogRecordV2.objects.get(pk=cr_id) - cr.user_created = self.token['CSCUserName'] - cr.metadata_provider_user = self.token['CSCUserName'] - cr.editor = None # pretend the record was created by user directly + cr.user_created = self.token["CSCUserName"] + cr.metadata_provider_user = self.token["CSCUserName"] + cr.editor = None # pretend the record was created by user directly cr.force_save() def _set_cr_to_permitted_catalog(self, cr_id): cr = CatalogRecordV2.objects.get(pk=cr_id) - cr.data_catalog_id = DataCatalog.objects.get(catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0]).id + cr.data_catalog_id = DataCatalog.objects.get( + catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0] + ).id cr.force_save() @responses.activate def test_user_can_create_dataset(self): - ''' + """ Ensure end user can create a new dataset, and required fields are automatically placed and the user is only able to affect allowed fields - ''' - user_created = self.token['CSCUserName'] - metadata_provider_user = self.token['CSCUserName'] - metadata_provider_org = self.token['schacHomeOrganization'] - metadata_owner_org = self.token['schacHomeOrganization'] - - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - self.cr_test_data['contract'] = 1 - self.cr_test_data['editor'] = { 'nope': 'discarded by metax' } - self.cr_test_data['preservation_description'] = 'discarded by metax' - self.cr_test_data['preservation_reason_description'] = 'discarded by metax' - self.cr_test_data['preservation_state'] = 10 - self.cr_test_data.pop('metadata_provider_user', None) - self.cr_test_data.pop('metadata_provider_org', None) - self.cr_test_data.pop('metadata_owner_org', None) + """ + user_created = self.token["CSCUserName"] + metadata_provider_user = self.token["CSCUserName"] + metadata_provider_org = self.token["schacHomeOrganization"] + metadata_owner_org = self.token["schacHomeOrganization"] + + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida + self.cr_test_data["contract"] = 1 + self.cr_test_data["editor"] = {"nope": "discarded by metax"} + self.cr_test_data["preservation_description"] = "discarded by metax" + self.cr_test_data["preservation_reason_description"] = "discarded by metax" + self.cr_test_data["preservation_state"] = 10 + self.cr_test_data.pop("metadata_provider_user", None) + self.cr_test_data.pop("metadata_provider_org", None) + self.cr_test_data.pop("metadata_owner_org", None) # test file permission checking in another test - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) - self.assertEqual(response.data['user_created'], user_created) - self.assertEqual(response.data['metadata_provider_user'], metadata_provider_user) - self.assertEqual(response.data['metadata_provider_org'], metadata_provider_org) - self.assertEqual(response.data['metadata_owner_org'], metadata_owner_org) - self.assertEqual('contract' in response.data, False) - self.assertEqual('editor' in response.data, False) - self.assertEqual('preservation_description' in response.data, False) - self.assertEqual('preservation_reason_description' in response.data, False) - self.assertEqual(response.data['preservation_state'], 0) + self.assertEqual(response.data["user_created"], user_created) + self.assertEqual(response.data["metadata_provider_user"], metadata_provider_user) + self.assertEqual(response.data["metadata_provider_org"], metadata_provider_org) + self.assertEqual(response.data["metadata_owner_org"], metadata_owner_org) + self.assertEqual("contract" in response.data, False) + self.assertEqual("editor" in response.data, False) + self.assertEqual("preservation_description" in response.data, False) + self.assertEqual("preservation_reason_description" in response.data, False) + self.assertEqual(response.data["preservation_state"], 0) @responses.activate def test_user_can_create_datasets_only_to_limited_catalogs(self): - ''' + """ End users should not be able to create datasets for example to harvested data catalogs. - ''' + """ # test file permission checking in another test - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) # should not work - self.cr_test_data['data_catalog'] = 1 - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = 1 + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # check error has expected error description - self.assertEqual('selected data catalog' in response.data['detail'][0], True, response.data) + self.assertEqual("selected data catalog" in response.data["detail"][0], True, response.data) # should work for identifier in END_USER_ALLOWED_DATA_CATALOGS: if identifier in LEGACY_CATALOGS: - self.cr_test_data['research_dataset']['preferred_identifier'] = 'a' + self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" - params = 'draft' if identifier == DFT_CATALOG else '' - self.cr_test_data['data_catalog'] = identifier - response = self.client.post(f'/rest/v2/datasets?{params}', self.cr_test_data, format="json") + params = "draft" if identifier == DFT_CATALOG else "" + self.cr_test_data["data_catalog"] = identifier + response = self.client.post( + f"/rest/v2/datasets?{params}", self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) if identifier in LEGACY_CATALOGS: # prevents next test from crashing if legacy catalog is not the last in the list - del self.cr_test_data['research_dataset']['preferred_identifier'] + del self.cr_test_data["research_dataset"]["preferred_identifier"] @responses.activate def test_owner_can_edit_dataset(self): - ''' + """ Ensure end users are able to edit datasets owned by them. Ensure end users can only edit permitted fields. Note: File project permissions should not be checked, since files are not changed. - ''' + """ # create test record - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] - self.cr_test_data['research_dataset'].pop('files', None) # test file permission checking in another test - self.cr_test_data['research_dataset'].pop('directories', None) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] + self.cr_test_data["research_dataset"].pop( + "files", None + ) # test file permission checking in another test + self.cr_test_data["research_dataset"].pop("directories", None) + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) modified_data = response.data # research_dataset is the only permitted field to edit - modified_data['research_dataset']['value'] = 112233 - modified_data['contract'] = 1 - modified_data['editor'] = { 'nope': 'discarded by metax' } - modified_data['preservation_description'] = 'discarded by metax' - modified_data['preservation_reason_description'] = 'discarded by metax' - modified_data['preservation_state'] = 10 - - response = self.client.put('/rest/v2/datasets/%d' % modified_data['id'], modified_data, format="json") + modified_data["research_dataset"]["value"] = 112233 + modified_data["contract"] = 1 + modified_data["editor"] = {"nope": "discarded by metax"} + modified_data["preservation_description"] = "discarded by metax" + modified_data["preservation_reason_description"] = "discarded by metax" + modified_data["preservation_state"] = 10 + + response = self.client.put( + "/rest/v2/datasets/%d" % modified_data["id"], modified_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['research_dataset']['value'], 112233) # value we set - self.assertEqual(response.data['user_modified'], self.token['CSCUserName']) # set by metax + self.assertEqual(response.data["research_dataset"]["value"], 112233) # value we set + self.assertEqual(response.data["user_modified"], self.token["CSCUserName"]) # set by metax # none of these should have been affected - self.assertEqual('contract' in response.data, False) - self.assertEqual('editor' in response.data, False) - self.assertEqual('preservation_description' in response.data, False) - self.assertEqual('preservation_reason_description' in response.data, False) - self.assertEqual(response.data['preservation_state'], 0) + self.assertEqual("contract" in response.data, False) + self.assertEqual("editor" in response.data, False) + self.assertEqual("preservation_description" in response.data, False) + self.assertEqual("preservation_reason_description" in response.data, False) + self.assertEqual(response.data["preservation_state"], 0) @responses.activate def test_owner_can_edit_datasets_only_in_permitted_catalogs(self): - ''' + """ Ensure end users are able to edit datasets only in permitted catalogs, even if they own the record (catalog may be disabled from end user editing for reason or another). - ''' + """ # create test record - self.cr_test_data['data_catalog'] = 1 - self.cr_test_data['user_created'] = self.token['CSCUserName'] - self.cr_test_data['metadata_provider_user'] = self.token['CSCUserName'] - self.cr_test_data.pop('editor', None) + self.cr_test_data["data_catalog"] = 1 + self.cr_test_data["user_created"] = self.token["CSCUserName"] + self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] + self.cr_test_data.pop("editor", None) - self._use_http_authorization() # create cr as a service-user - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self._use_http_authorization() # create cr as a service-user + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) modified_data = response.data - modified_data['research_dataset']['value'] = 112233 + modified_data["research_dataset"]["value"] = 112233 - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.put('/rest/v2/datasets/%d' % modified_data['id'], modified_data, format="json") + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.put( + "/rest/v2/datasets/%d" % modified_data["id"], modified_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @responses.activate def test_other_users_cant_edit_dataset(self): - ''' + """ Ensure end users are unable edit datasets not owned by them. - ''' - response = self.client.get('/rest/v2/datasets/1', format="json") + """ + response = self.client.get("/rest/v2/datasets/1", format="json") modified_data = response.data - modified_data['research_dataset']['value'] = 112233 + modified_data["research_dataset"]["value"] = 112233 - response = self.client.put('/rest/v2/datasets/1', modified_data, format="json") + response = self.client.put("/rest/v2/datasets/1", modified_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.put('/rest/v2/datasets', [modified_data], format="json") + response = self.client.put("/rest/v2/datasets", [modified_data], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # ^ individual errors do not have error codes, only the general request # has an error code for a failed request. @@ -1800,87 +2134,92 @@ def test_other_users_cant_edit_dataset(self): def test_user_can_delete_dataset(self): self._set_cr_owner_to_token_user(1) self._set_cr_to_permitted_catalog(1) - response = self.client.delete('/rest/v2/datasets/1', format="json") + response = self.client.delete("/rest/v2/datasets/1", format="json") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) @responses.activate def test_user_file_permissions_are_checked_during_dataset_create(self): - ''' + """ Ensure user's association with a project is checked during dataset create when attaching files or directories to a dataset. - ''' + """ # try creating without proper permisisons - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.content) # add project membership to user's token and try again - file_identifier = self.cr_test_data['research_dataset']['files'][0]['identifier'] + file_identifier = self.cr_test_data["research_dataset"]["files"][0]["identifier"] project_identifier = File.objects.get(identifier=file_identifier).project_identifier - self.token['group_names'].append('IDA01:%s' % project_identifier) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % project_identifier) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) @responses.activate def test_user_file_permissions_are_checked_during_dataset_update(self): - ''' + """ Ensure user's association with a project is checked during dataset update when attaching files or directories to a dataset. The permissions should be checked only for changed files (newly added, or removed). - ''' + """ # get some files to add to another dataset - new_files = CatalogRecordV2.objects.get(pk=1).research_dataset['files'] + new_files = CatalogRecordV2.objects.get(pk=1).research_dataset["files"] - self.cr_test_data['data_catalog'] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] - file_changes = { - 'files': new_files - } + file_changes = {"files": new_files} # should fail, since user's token has no permission for the newly added files - response = self.client.post(f'/rest/v2/datasets/{cr_id}/files', file_changes, format="json") + response = self.client.post(f"/rest/v2/datasets/{cr_id}/files", file_changes, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.content) # add project membership to user's token and try again - project_identifier = File.objects.get(identifier=new_files[0]['identifier']).project_identifier - self.token['group_names'].append('IDA01:%s' % project_identifier) - self._use_http_authorization(method='bearer', token=self.token) + project_identifier = File.objects.get( + identifier=new_files[0]["identifier"] + ).project_identifier + self.token["group_names"].append("IDA01:%s" % project_identifier) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.post(f'/rest/v2/datasets/{cr_id}/files', file_changes, format="json") + response = self.client.post(f"/rest/v2/datasets/{cr_id}/files", file_changes, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) @responses.activate def test_owner_receives_unfiltered_dataset_data(self): - ''' + """ The general public will have some fields filtered out from the dataset, in order to protect sensitive data. The owner of a dataset however should always receive full data. - ''' + """ self._set_cr_owner_to_token_user(1) def _check_fields(obj): - for sensitive_field in ['email', 'telephone', 'phone']: - self.assertEqual(sensitive_field in obj['research_dataset']['curator'][0], True, - 'field %s should be present' % sensitive_field) + for sensitive_field in ["email", "telephone", "phone"]: + self.assertEqual( + sensitive_field in obj["research_dataset"]["curator"][0], + True, + "field %s should be present" % sensitive_field, + ) for cr in CatalogRecordV2.objects.filter(pk=1): - cr.research_dataset['curator'][0].update({ - 'email': 'email@mail.com', - 'phone': '123124', - 'telephone': '123124', - }) + cr.research_dataset["curator"][0].update( + { + "email": "email@mail.com", + "phone": "123124", + "telephone": "123124", + } + ) cr.force_save() - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) _check_fields(response.data) @@ -1897,111 +2236,132 @@ def setUp(self): """ super().setUp() - self.dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema='att').first() - self.dc.catalog_json['identifier'] = EXT_CATALOG - self.dc.catalog_json['harvested'] = True - self.dc.catalog_record_services_create = 'external' - self.dc.catalog_record_services_edit = 'external' + self.dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema="att").first() + self.dc.catalog_json["identifier"] = EXT_CATALOG + self.dc.catalog_json["harvested"] = True + self.dc.catalog_record_services_create = "external" + self.dc.catalog_record_services_edit = "external" self.dc.force_save() - self.cr_test_data['data_catalog'] = self.dc.catalog_json['identifier'] - del self.cr_test_data['research_dataset']['files'] - del self.cr_test_data['research_dataset']['total_files_byte_size'] + self.cr_test_data["data_catalog"] = self.dc.catalog_json["identifier"] + del self.cr_test_data["research_dataset"]["files"] + del self.cr_test_data["research_dataset"]["total_files_byte_size"] - self._use_http_authorization(username=django_settings.API_EXT_USER['username'], - password=django_settings.API_EXT_USER['password']) + self._use_http_authorization( + username=django_settings.API_EXT_USER["username"], + password=django_settings.API_EXT_USER["password"], + ) def test_external_service_can_not_read_all_metadata_in_other_catalog(self): - ''' External service should get the same output from someone elses catalog than anonymous user ''' + """ External service should get the same output from someone elses catalog than anonymous user """ # create a catalog that does not belong to our external service dc2 = DataCatalog.objects.get(pk=2) - dc2.catalog_json['identifier'] = 'Some other catalog' - dc2.catalog_record_services_read = 'metax' + dc2.catalog_json["identifier"] = "Some other catalog" + dc2.catalog_record_services_read = "metax" dc2.force_save() # Create a catalog record that belongs to some other user & our catalog nr2 cr = CatalogRecordV2.objects.get(pk=12) - cr.user_created = '#### Some owner who is not you ####' - cr.metadata_provider_user = '#### Some owner who is not you ####' + cr.user_created = "#### Some owner who is not you ####" + cr.metadata_provider_user = "#### Some owner who is not you ####" cr.data_catalog = dc2 cr.editor = None - cr.research_dataset['access_rights']['access_type']['identifier'] = ACCESS_TYPES['restricted'] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] cr.force_save() # Let's try to return the data with our external services credentials - response_service_user = self.client.get('/rest/v2/datasets/12') - self.assertEqual(response_service_user.status_code, status.HTTP_200_OK, response_service_user.data) + response_service_user = self.client.get("/rest/v2/datasets/12") + self.assertEqual( + response_service_user.status_code, + status.HTTP_200_OK, + response_service_user.data, + ) # Test access as unauthenticated user self.client._credentials = {} - response_anonymous = self.client.get('/rest/v2/datasets/12') - self.assertEqual(response_anonymous.status_code, status.HTTP_200_OK, response_anonymous.data) + response_anonymous = self.client.get("/rest/v2/datasets/12") + self.assertEqual( + response_anonymous.status_code, status.HTTP_200_OK, response_anonymous.data + ) - self.assertEqual(response_anonymous.data, response_service_user.data, - "External service with no read-rights should not see any more metadata than anonymous user from a catalog") + self.assertEqual( + response_anonymous.data, + response_service_user.data, + "External service with no read-rights should not see any more metadata than anonymous user from a catalog", + ) def test_external_service_can_add_catalog_record_to_own_catalog(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = '123456' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], '123456') + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "123456") def test_external_service_can_update_catalog_record_in_own_catalog(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = '123456' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], '123456') + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "123456") - cr_id = response.data['id'] - self.cr_test_data['research_dataset']['preferred_identifier'] = '654321' - response = self.client.put('/rest/v2/datasets/{}'.format(cr_id), self.cr_test_data, format="json") + cr_id = response.data["id"] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "654321" + response = self.client.put( + "/rest/v2/datasets/{}".format(cr_id), self.cr_test_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['research_dataset']['preferred_identifier'], '654321') + self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "654321") def test_external_service_can_delete_catalog_record_from_own_catalog(self): - self.cr_test_data['research_dataset']['preferred_identifier'] = '123456' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - cr_id = response.data['id'] - response = self.client.delete('/rest/v2/datasets/{}'.format(cr_id)) + cr_id = response.data["id"] + response = self.client.delete("/rest/v2/datasets/{}".format(cr_id)) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - response = self.client.get('/rest/v2/datasets/{}'.format(cr_id), format="json") - self.assertEqual('not found' in response.json()['detail'].lower(), True) + response = self.client.get("/rest/v2/datasets/{}".format(cr_id), format="json") + self.assertEqual("not found" in response.json()["detail"].lower(), True) def test_external_service_can_not_add_catalog_record_to_other_catalog(self): - dc = self._get_object_from_test_data('datacatalog', requested_index=1) - self.cr_test_data['data_catalog'] = dc['catalog_json']['identifier'] - self.cr_test_data['research_dataset']['preferred_identifier'] = 'temp-pid' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + dc = self._get_object_from_test_data("datacatalog", requested_index=1) + self.cr_test_data["data_catalog"] = dc["catalog_json"]["identifier"] + self.cr_test_data["research_dataset"]["preferred_identifier"] = "temp-pid" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_external_service_can_not_update_catalog_record_in_other_catalog(self): - response = self.client.put('/rest/v2/datasets/1', {}, format="json") + response = self.client.put("/rest/v2/datasets/1", {}, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_external_service_can_not_delete_catalog_record_from_other_catalog(self): - response = self.client.delete('/rest/v2/datasets/1') + response = self.client.delete("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_harvested_catalogs_must_have_preferred_identifier_create(self): # create without preferred identifier - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('must have preferred identifier' in - response.data['research_dataset']['preferred_identifier'][0], True) + self.assertEqual( + "must have preferred identifier" + in response.data["research_dataset"]["preferred_identifier"][0], + True, + ) - self.cr_test_data['research_dataset']['preferred_identifier'] = '' - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + self.cr_test_data["research_dataset"]["preferred_identifier"] = "" + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('must have preferred identifier' in - response.data['research_dataset']['preferred_identifier'][0], True) + self.assertEqual( + "must have preferred identifier" + in response.data["research_dataset"]["preferred_identifier"][0], + True, + ) diff --git a/src/metax_api/tests/api/rest/v2/views/directories/read.py b/src/metax_api/tests/api/rest/v2/views/directories/read.py index f88925e3..6146ab20 100755 --- a/src/metax_api/tests/api/rest/v2/views/directories/read.py +++ b/src/metax_api/tests/api/rest/v2/views/directories/read.py @@ -19,19 +19,18 @@ class DirectoryApiReadCommon(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DirectoryApiReadCommon, cls).setUpClass() def setUp(self): - dir_from_test_data = self._get_object_from_test_data('directory') - self.identifier = dir_from_test_data['identifier'] - self.pk = dir_from_test_data['id'] + dir_from_test_data = self._get_object_from_test_data("directory") + self.identifier = dir_from_test_data["identifier"] + self.pk = dir_from_test_data["id"] self._use_http_authorization() def _create_test_dirs(self, count): @@ -39,61 +38,73 @@ def _create_test_dirs(self, count): with transaction.atomic(): for n in range(1, count): f = self._get_new_file_data(str(n)) - self.client.post('/rest/v2/files', f, format="json") + self.client.post("/rest/v2/files", f, format="json") def _get_dirs_files_ids(self, url): file_data = self.client.get(url).data if isinstance(file_data, dict): - return {key: [f['id'] for f in file_data[key]] for key in file_data.keys() - if key in ['directories', 'files']} + return { + key: [f["id"] for f in file_data[key]] + for key in file_data.keys() + if key in ["directories", "files"] + } else: - return [f['id'] for f in file_data] + return [f["id"] for f in file_data] def _get_new_file_data(self, file_n): - from_test_data = self._get_object_from_test_data('file', requested_index=0) - - path = '/prj_112_root/science_data_C/phase_2/2017/10/dir_' + file_n + '/file_' + file_n - identifier = 'urn:nbn:fi:100' + file_n - - from_test_data.update({ - "checksum": { - "value": "habeebit", - "algorithm": "SHA-256", - "checked": "2017-05-23T10:07:22.559656Z", - }, - "file_name": "tiedosto_name_" + file_n, - "file_path": path, - "identifier": identifier, - "file_storage": self._get_object_from_test_data('filestorage', requested_index=0), - 'parent_directory': 24, - 'project_identifier': 'research_project_112' - }) - del from_test_data['id'] + from_test_data = self._get_object_from_test_data("file", requested_index=0) + + path = "/prj_112_root/science_data_C/phase_2/2017/10/dir_" + file_n + "/file_" + file_n + identifier = "urn:nbn:fi:100" + file_n + + from_test_data.update( + { + "checksum": { + "value": "habeebit", + "algorithm": "SHA-256", + "checked": "2017-05-23T10:07:22.559656Z", + }, + "file_name": "tiedosto_name_" + file_n, + "file_path": path, + "identifier": identifier, + "file_storage": self._get_object_from_test_data("filestorage", requested_index=0), + "parent_directory": 24, + "project_identifier": "research_project_112", + } + ) + del from_test_data["id"] return from_test_data class DirectoryApiReadBasicTests(DirectoryApiReadCommon): - def test_read_directory_list(self): - response = self.client.get('/rest/v2/directories') + response = self.client.get("/rest/v2/directories") self.assertEqual(response.status_code, 501) def test_read_directory_details_by_pk(self): - response = self.client.get('/rest/v2/directories/%s' % self.pk) + response = self.client.get("/rest/v2/directories/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('directory_name' in response.data.keys(), True) - self.assertEqual(response.data['identifier'], self.identifier) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("directory_name" in response.data.keys(), True) + self.assertEqual(response.data["identifier"], self.identifier) def test_read_directory_details_by_identifier(self): - response = self.client.get('/rest/v2/directories/%s' % self.identifier) + response = self.client.get("/rest/v2/directories/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('directory_name' in response.data.keys(), True) - self.assertEqual(response.data['identifier'], self.identifier) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("directory_name" in response.data.keys(), True) + self.assertEqual(response.data["identifier"], self.identifier) def test_read_directory_details_not_found(self): - response = self.client.get('/rest/v2/directories/shouldnotexist') + response = self.client.get("/rest/v2/directories/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) @@ -107,37 +118,37 @@ def test_read_directory_get_files(self): """ Test browsing files """ - response = self.client.get('/rest/v2/directories/2/files') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 3) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 2) - self.assertEqual(len(response.data['files']), 0) - - response = self.client.get('/rest/v2/directories/3/files') - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 4) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 3) - self.assertEqual(len(response.data['files']), 5) - self.assertEqual(response.data['files'][0]['parent_directory']['id'], 3) - self.assertEqual(response.data['files'][4]['parent_directory']['id'], 3) - - response = self.client.get('/rest/v2/directories/4/files') - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 4) - self.assertEqual(len(response.data['files']), 5) - self.assertEqual(response.data['files'][0]['parent_directory']['id'], 4) - self.assertEqual(response.data['files'][4]['parent_directory']['id'], 4) - - response = self.client.get('/rest/v2/directories/5/files') - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 0) - - response = self.client.get('/rest/v2/directories/6/files') - self.assertEqual(len(response.data['directories']), 0) - self.assertEqual(len(response.data['files']), 10) - self.assertEqual(response.data['files'][0]['parent_directory']['id'], 6) - self.assertEqual(response.data['files'][9]['parent_directory']['id'], 6) + response = self.client.get("/rest/v2/directories/2/files") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 3) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 2) + self.assertEqual(len(response.data["files"]), 0) + + response = self.client.get("/rest/v2/directories/3/files") + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 4) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 3) + self.assertEqual(len(response.data["files"]), 5) + self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 3) + self.assertEqual(response.data["files"][4]["parent_directory"]["id"], 3) + + response = self.client.get("/rest/v2/directories/4/files") + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 4) + self.assertEqual(len(response.data["files"]), 5) + self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 4) + self.assertEqual(response.data["files"][4]["parent_directory"]["id"], 4) + + response = self.client.get("/rest/v2/directories/5/files") + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 0) + + response = self.client.get("/rest/v2/directories/6/files") + self.assertEqual(len(response.data["directories"]), 0) + self.assertEqual(len(response.data["files"]), 10) + self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 6) + self.assertEqual(response.data["files"][9]["parent_directory"]["id"], 6) def test_read_directory_get_files_recursively(self): """ @@ -145,85 +156,88 @@ def test_read_directory_get_files_recursively(self): """ # without depth, returns from depth=1, which should contain no files - response = self.client.get('/rest/v2/directories/1/files?recursive') + response = self.client.get("/rest/v2/directories/1/files?recursive") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), 0) # dir id 1 (the root) contains 0 files, but recursively 20 - response = self.client.get('/rest/v2/directories/1/files?recursive=true&depth=*') + response = self.client.get("/rest/v2/directories/1/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 20) # dir id 3 contains 5 files, but recursively 20 - response = self.client.get('/rest/v2/directories/3/files?recursive=true&depth=*') + response = self.client.get("/rest/v2/directories/3/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 20) # dir id 4 contains 5 files, but recursively 15 - response = self.client.get('/rest/v2/directories/4/files?recursive=true&depth=*') + response = self.client.get("/rest/v2/directories/4/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 15) # dir id 5 contains 0 files - response = self.client.get('/rest/v2/directories/5/files?recursive=true&depth=*') + response = self.client.get("/rest/v2/directories/5/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) # dir id 6 contains 10 files - response = self.client.get('/rest/v2/directories/6/files?recursive=true&depth=*') + response = self.client.get("/rest/v2/directories/6/files?recursive=true&depth=*") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) def test_read_directory_get_files_file_not_found(self): - response = self.client.get('/rest/v2/directories/not_found/files') + response = self.client.get("/rest/v2/directories/not_found/files") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_directory_get_project_root_directory(self): - response = self.client.get('/rest/v2/directories/root?project=project_x') + response = self.client.get("/rest/v2/directories/root?project=project_x") self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) - self.assertEqual(response.data['id'], 1) - self.assertEqual('directories' in response.data, True) - self.assertEqual('files' in response.data, True) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 2) + self.assertEqual(response.data["id"], 1) + self.assertEqual("directories" in response.data, True) + self.assertEqual("files" in response.data, True) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 2) def test_read_directory_get_project_root_directory_not_found(self): - response = self.client.get('/rest/v2/directories/root?project=project_xyz') + response = self.client.get("/rest/v2/directories/root?project=project_xyz") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_directory_get_project_root_directory_parameter_missing(self): - response = self.client.get('/rest/v2/directories/root') + response = self.client.get("/rest/v2/directories/root") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('required' in response.data['detail'][0], True, response.data) + self.assertEqual("required" in response.data["detail"][0], True, response.data) def test_read_directory_get_files_by_path(self): dr = Directory.objects.get(pk=2) - response = self.client.get('/rest/v2/directories/files?path=%s&project=%s' % - (dr.directory_path, dr.project_identifier)) + response = self.client.get( + "/rest/v2/directories/files?path=%s&project=%s" + % (dr.directory_path, dr.project_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data['directories'][0]['id'], 3) - self.assertEqual(response.data['directories'][0]['parent_directory']['id'], 2) - self.assertEqual(len(response.data['files']), 0) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data["directories"][0]["id"], 3) + self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 2) + self.assertEqual(len(response.data["files"]), 0) def test_read_directory_get_files_by_path_not_found(self): - response = self.client.get('/rest/v2/directories/files?path=%s&project=%s' % - ('doesnotexist', 'doesnotexist')) + response = self.client.get( + "/rest/v2/directories/files?path=%s&project=%s" % ("doesnotexist", "doesnotexist") + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_directory_get_files_by_path_check_parameters(self): - response = self.client.get('/rest/v2/directories/files') + response = self.client.get("/rest/v2/directories/files") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/directories/files?path=something') + response = self.client.get("/rest/v2/directories/files?path=something") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/directories/files?project=something') + response = self.client.get("/rest/v2/directories/files?project=something") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_recursively_with_max_depth(self): """ Should return a flat list of files, three directories deep """ - response = self.client.get('/rest/v2/directories/2/files?recursive=true&depth=3') + response = self.client.get("/rest/v2/directories/2/files?recursive=true&depth=3") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) @@ -231,11 +245,13 @@ def test_read_directory_recursively_with_dirs_only_and_max_depth(self): """ Should return a directory hierarchy, three directories deep, with no files at all. """ - response = self.client.get('/rest/v2/directories/2/files?recursive=true&directories_only=true&depth=3') + response = self.client.get( + "/rest/v2/directories/2/files?recursive=true&directories_only=true&depth=3" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('directories' in response.data, True) - self.assertEqual('directories' in response.data['directories'][0], True) - self.assertEqual('directories' in response.data['directories'][0]['directories'][0], True) + self.assertEqual("directories" in response.data, True) + self.assertEqual("directories" in response.data["directories"][0], True) + self.assertEqual("directories" in response.data["directories"][0]["directories"][0], True) def test_read_directory_recursively_with_no_depth(self): """ @@ -244,166 +260,233 @@ def test_read_directory_recursively_with_no_depth(self): Using parameter directories_only=true to easier count the depth. """ - response = self.client.get('/rest/v2/directories/3/files?recursive=true&directories_only=true') + response = self.client.get( + "/rest/v2/directories/3/files?recursive=true&directories_only=true" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('directories' in response.data, True) - self.assertEqual('directories' in response.data['directories'][0], True) + self.assertEqual("directories" in response.data, True) + self.assertEqual("directories" in response.data["directories"][0], True) def test_read_directory_return_directories_only(self): - response = self.client.get('/rest/v2/directories/3/files?directories_only') + response = self.client.get("/rest/v2/directories/3/files?directories_only") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual('files' in response.data, False) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual("files" in response.data, False) def test_read_directory_with_include_parent(self): - response = self.client.get('/rest/v2/directories/3/files?include_parent') + response = self.client.get("/rest/v2/directories/3/files?include_parent") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 5) - self.assertEqual(response.data.get('id', None), 3) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 5) + self.assertEqual(response.data.get("id", None), 3) def test_read_directory_files_sorted_by_file_path(self): - response = self.client.get('/rest/v2/directories/3/files') + response = self.client.get("/rest/v2/directories/3/files") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['files'][0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_1') - self.assertEqual(response.data['files'][1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_2') - self.assertEqual(response.data['files'][2]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') + self.assertEqual( + response.data["files"][0]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_1", + ) + self.assertEqual( + response.data["files"][1]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_2", + ) + self.assertEqual( + response.data["files"][2]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_3", + ) - response = self.client.get('/rest/v2/directories/3/files?pagination&limit=2&offset=2') + response = self.client.get("/rest/v2/directories/3/files?pagination&limit=2&offset=2") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results']['files'][0]['file_path'], - '/project_x_FROZEN/Experiment_X/file_name_2') - self.assertEqual(response.data['results']['files'][1]['file_path'], - '/project_x_FROZEN/Experiment_X/file_name_3') + self.assertEqual( + response.data["results"]["files"][0]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_2", + ) + self.assertEqual( + response.data["results"]["files"][1]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_3", + ) - response = self.client.get('/rest/v2/directories/3/files?cr_identifier=2') + response = self.client.get("/rest/v2/directories/3/files?cr_identifier=2") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['files'][0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') - self.assertEqual(response.data['files'][1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_4') + self.assertEqual( + response.data["files"][0]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_3", + ) + self.assertEqual( + response.data["files"][1]["file_path"], + "/project_x_FROZEN/Experiment_X/file_name_4", + ) - response = self.client.get('/rest/v2/directories/3/files?recursive') + response = self.client.get("/rest/v2/directories/3/files?recursive") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data[0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_1') - self.assertEqual(response.data[1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_2') - self.assertEqual(response.data[2]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') + self.assertEqual( + response.data[0]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_1" + ) + self.assertEqual( + response.data[1]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_2" + ) + self.assertEqual( + response.data[2]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_3" + ) - response = self.client.get('/rest/v2/directories/3/files?recursive&cr_identifier=2') + response = self.client.get("/rest/v2/directories/3/files?recursive&cr_identifier=2") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data[0]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_3') - self.assertEqual(response.data[1]['file_path'], '/project_x_FROZEN/Experiment_X/file_name_4') + self.assertEqual( + response.data[0]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_3" + ) + self.assertEqual( + response.data[1]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_4" + ) def test_read_directory_directories_sorted_by_directory_path(self): - response = self.client.get('/rest/v2/directories/8/files') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['directories'][0]['directory_path'], - '/prj_112_root/other') - self.assertEqual(response.data['directories'][1]['directory_path'], - '/prj_112_root/random_folder', response.data) - self.assertEqual(response.data['directories'][2]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['directories'][3]['directory_path'], - '/prj_112_root/science_data_B') - self.assertEqual(response.data['directories'][4]['directory_path'], - '/prj_112_root/science_data_C') - - response = self.client.get('/rest/v2/directories/8/files?pagination&limit=2&offset=2') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['results']['directories'][0]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['results']['directories'][1]['directory_path'], - '/prj_112_root/science_data_B') - - response = self.client.get('/rest/v2/directories/8/files?directories_only') - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['directories'][0]['directory_path'], - '/prj_112_root/other') - self.assertEqual(response.data['directories'][1]['directory_path'], - '/prj_112_root/random_folder', response.data) - self.assertEqual(response.data['directories'][2]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['directories'][3]['directory_path'], - '/prj_112_root/science_data_B') - self.assertEqual(response.data['directories'][4]['directory_path'], - '/prj_112_root/science_data_C') - - response = self.client.get('/rest/v2/directories/8/files?cr_identifier=13') + response = self.client.get("/rest/v2/directories/8/files") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") + self.assertEqual( + response.data["directories"][1]["directory_path"], + "/prj_112_root/random_folder", + response.data, + ) + self.assertEqual( + response.data["directories"][2]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["directories"][3]["directory_path"], + "/prj_112_root/science_data_B", + ) + self.assertEqual( + response.data["directories"][4]["directory_path"], + "/prj_112_root/science_data_C", + ) + + response = self.client.get("/rest/v2/directories/8/files?pagination&limit=2&offset=2") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual( + response.data["results"]["directories"][0]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["results"]["directories"][1]["directory_path"], + "/prj_112_root/science_data_B", + ) + + response = self.client.get("/rest/v2/directories/8/files?directories_only") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") + self.assertEqual( + response.data["directories"][1]["directory_path"], + "/prj_112_root/random_folder", + response.data, + ) + self.assertEqual( + response.data["directories"][2]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["directories"][3]["directory_path"], + "/prj_112_root/science_data_B", + ) + self.assertEqual( + response.data["directories"][4]["directory_path"], + "/prj_112_root/science_data_C", + ) + + response = self.client.get("/rest/v2/directories/8/files?cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['directories'][0]['directory_path'], - '/prj_112_root/other') - self.assertEqual(response.data['directories'][1]['directory_path'], - '/prj_112_root/random_folder') - self.assertEqual(response.data['directories'][2]['directory_path'], - '/prj_112_root/science_data_A') - self.assertEqual(response.data['directories'][3]['directory_path'], - '/prj_112_root/science_data_B') + self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") + self.assertEqual( + response.data["directories"][1]["directory_path"], + "/prj_112_root/random_folder", + ) + self.assertEqual( + response.data["directories"][2]["directory_path"], + "/prj_112_root/science_data_A", + ) + self.assertEqual( + response.data["directories"][3]["directory_path"], + "/prj_112_root/science_data_B", + ) class DirectoryApiReadFileBrowsingRetrieveSpecificFieldsTests(DirectoryApiReadCommon): - def test_retrieve_requested_directory_fields_only(self): - response = self.client.get('/rest/v2/directories/3/files?directory_fields=identifier,directory_path') + response = self.client.get( + "/rest/v2/directories/3/files?directory_fields=identifier,directory_path" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('directory_path' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("directory_path" in response.data["directories"][0], True) - self._use_http_authorization(username='metax') + self._use_http_authorization(username="metax") - response = self.client.get('/rest/v2/directories/17/files? \ - cr_identifier=13&directory_fields=directory_name&directories_only&recursive') + response = self.client.get( + "/rest/v2/directories/17/files? \ + cr_identifier=13&directory_fields=directory_name&directories_only&recursive" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertTrue('directories' in response.data['directories'][0]) - self.assertTrue('directory_name' in response.data['directories'][0]) - self.assertFalse('id' in response.data['directories'][0]) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertTrue("directories" in response.data["directories"][0]) + self.assertTrue("directory_name" in response.data["directories"][0]) + self.assertFalse("id" in response.data["directories"][0]) def test_retrieve_directory_byte_size_and_file_count(self): """ There is some additional logic involved in retrieving byte_size and file_count, which warrants targeted tests for just those fields. """ - response = self.client.get('/rest/v2/directories/3/files?directory_fields=identifier,byte_size') + response = self.client.get( + "/rest/v2/directories/3/files?directory_fields=identifier,byte_size" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('byte_size' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("byte_size" in response.data["directories"][0], True) - response = self.client.get('/rest/v2/directories/3/files?directory_fields=identifier,file_count') + response = self.client.get( + "/rest/v2/directories/3/files?directory_fields=identifier,file_count" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('file_count' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("file_count" in response.data["directories"][0], True) response = self.client.get( - '/rest/v2/directories/3/files?directory_fields=identifier,file_count&cr_identifier=3') + "/rest/v2/directories/3/files?directory_fields=identifier,file_count&cr_identifier=3" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('file_count' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("file_count" in response.data["directories"][0], True) response = self.client.get( - '/rest/v2/directories/3/files?directory_fields=identifier,file_count¬_cr_identifier=2') + "/rest/v2/directories/3/files?directory_fields=identifier,file_count¬_cr_identifier=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories'][0].keys()), 2) - self.assertEqual('identifier' in response.data['directories'][0], True) - self.assertEqual('file_count' in response.data['directories'][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 2) + self.assertEqual("identifier" in response.data["directories"][0], True) + self.assertEqual("file_count" in response.data["directories"][0], True) def test_retrieve_requested_file_fields_only(self): - response = self.client.get('/rest/v2/directories/3/files?file_fields=identifier,file_path') + response = self.client.get("/rest/v2/directories/3/files?file_fields=identifier,file_path") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files'][0].keys()), 2) - self.assertEqual('identifier' in response.data['files'][0], True) - self.assertEqual('file_path' in response.data['files'][0], True) + self.assertEqual(len(response.data["files"][0].keys()), 2) + self.assertEqual("identifier" in response.data["files"][0], True) + self.assertEqual("file_path" in response.data["files"][0], True) def test_retrieve_requested_file_and_directory_fields_only(self): - response = self.client.get('/rest/v2/directories/3/files?file_fields=identifier&directory_fields=id') + response = self.client.get( + "/rest/v2/directories/3/files?file_fields=identifier&directory_fields=id" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files'][0].keys()), 1) - self.assertEqual('identifier' in response.data['files'][0], True) - self.assertEqual(len(response.data['directories'][0].keys()), 1) - self.assertEqual('id' in response.data['directories'][0], True) + self.assertEqual(len(response.data["files"][0].keys()), 1) + self.assertEqual("identifier" in response.data["files"][0], True) + self.assertEqual(len(response.data["directories"][0].keys()), 1) + self.assertEqual("id" in response.data["directories"][0], True) def test_not_retrieving_not_allowed_directory_fields(self): from metax_api.api.rest.base.serializers import DirectorySerializer, FileSerializer @@ -411,19 +494,27 @@ def test_not_retrieving_not_allowed_directory_fields(self): allowed_dir_fields = set(DirectorySerializer.Meta.fields) allowed_file_fields = set(FileSerializer.Meta.fields) - response = self.client.get('/rest/v2/directories/3/files?file_fields=parent,id&directory_fields=;;drop db;,id') + response = self.client.get( + "/rest/v2/directories/3/files?file_fields=parent,id&directory_fields=;;drop db;,id" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(any(field in response.data['files'][0].keys() for field in allowed_file_fields)) - self.assertTrue(any(field in response.data['directories'][0].keys() for field in allowed_dir_fields)) + self.assertTrue( + any(field in response.data["files"][0].keys() for field in allowed_file_fields) + ) + self.assertTrue( + any(field in response.data["directories"][0].keys() for field in allowed_dir_fields) + ) - response = self.client.get('/rest/v2/directories/3/files?file_fields=parent&directory_fields=or') + response = self.client.get( + "/rest/v2/directories/3/files?file_fields=parent&directory_fields=or" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/directories/3/files?file_fields=parent') + response = self.client.get("/rest/v2/directories/3/files?file_fields=parent") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get('/rest/v2/directories/3/files?directory_fields=or') + response = self.client.get("/rest/v2/directories/3/files?directory_fields=or") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) @@ -436,50 +527,57 @@ class DirectoryApiReadCatalogRecordFileBrowsingTests(DirectoryApiReadCommon): """ def setUp(self): - self._set_http_authorization('service') - self.client.get('/rest/v2/directories/update_byte_sizes_and_file_counts') - self.client.get('/rest/v2/datasets/update_cr_directory_browsing_data') + self._set_http_authorization("service") + self.client.get("/rest/v2/directories/update_byte_sizes_and_file_counts") + self.client.get("/rest/v2/datasets/update_cr_directory_browsing_data") def test_read_directory_catalog_record_and_not_catalog_record_not_ok(self): """ Test query parameter 'cr_identifier' and 'not_cr_identifier' can not be queried together. """ - response = self.client.get('/rest/v2/directories/3/files?cr_identifier=1¬_cr_identifier=2') + response = self.client.get( + "/rest/v2/directories/3/files?cr_identifier=1¬_cr_identifier=2" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("one query parameter of 'cr_identifier' and 'not_cr_identifier'" in response.data['detail'][0]) + self.assertTrue( + "one query parameter of 'cr_identifier' and 'not_cr_identifier'" + in response.data["detail"][0] + ) def test_read_directory_for_catalog_record(self): """ Test query parameter 'cr_identifier'. """ - response = self.client.get('/rest/v2/directories/3/files?cr_identifier=%s' - % CatalogRecord.objects.get(pk=1).identifier) + response = self.client.get( + "/rest/v2/directories/3/files?cr_identifier=%s" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('directories' in response.data, True) - self.assertEqual('files' in response.data, True) - self.assertEqual(len(response.data['directories']), 0) - self.assertEqual(len(response.data['files']), 2) - for f in response.data['files']: - self.assertTrue(f['parent_directory']['id'], 1) + self.assertEqual("directories" in response.data, True) + self.assertEqual("files" in response.data, True) + self.assertEqual(len(response.data["directories"]), 0) + self.assertEqual(len(response.data["files"]), 2) + for f in response.data["files"]: + self.assertTrue(f["parent_directory"]["id"], 1) def test_read_directory_for_not_catalog_record(self): """ Test query parameter 'not_cr_identifier'. """ - response = self.client.get('/rest/v2/directories/3/files?not_cr_identifier=2') + response = self.client.get("/rest/v2/directories/3/files?not_cr_identifier=2") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 3, response.data) - for f in response.data['files']: - self.assertNotEqual(f['parent_directory']['id'], 2, response.data) - self.assertEqual(len(response.data['directories']), 1, response.data) - self.assertNotEqual(response.data['directories'][0]['parent_directory']['id'], 2) + self.assertEqual(len(response.data["files"]), 3, response.data) + for f in response.data["files"]: + self.assertNotEqual(f["parent_directory"]["id"], 2, response.data) + self.assertEqual(len(response.data["directories"]), 1, response.data) + self.assertNotEqual(response.data["directories"][0]["parent_directory"]["id"], 2) def test_read_directory_for_catalog_record_not_found(self): """ Not found cr_identifier should raise 400 instead of 404, which is raised when the directory itself is not found. the error contains details about the 400. """ - response = self.client.get('/rest/v2/directories/3/files?cr_identifier=notexisting') + response = self.client.get("/rest/v2/directories/3/files?cr_identifier=notexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_for_not_catalog_record_not_found(self): @@ -487,7 +585,7 @@ def test_read_directory_for_not_catalog_record_not_found(self): Not found cr_identifier should raise 400 instead of 404, which is raised when the directory itself is not found. the error contains details about the 400. """ - response = self.client.get('/rest/v2/directories/3/files?not_cr_identifier=notexisting') + response = self.client.get("/rest/v2/directories/3/files?not_cr_identifier=notexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_for_catalog_record_directory_does_not_exist(self): @@ -497,63 +595,77 @@ def test_read_directory_for_catalog_record_directory_does_not_exist(self): """ # should be OK... - response = self.client.get('/rest/v2/directories/4/files') + response = self.client.get("/rest/v2/directories/4/files") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 5) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 5) # ... but should not contain any files FOR THIS CR - response = self.client.get('/rest/v2/directories/4/files?cr_identifier=%s' - % CatalogRecord.objects.get(pk=1).identifier) + response = self.client.get( + "/rest/v2/directories/4/files?cr_identifier=%s" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) # ... and should contain files ALL BUT THIS CR - response = self.client.get('/rest/v2/directories/4/files?not_cr_identifier=%s' - % CatalogRecord.objects.get(pk=1).identifier) + response = self.client.get( + "/rest/v2/directories/4/files?not_cr_identifier=%s" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 5) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 5) def test_read_directory_for_catalog_record_recursively(self): """ Test query parameters 'cr_identifier' with 'recursive'. """ - response = self.client.get('/rest/v2/directories/1/files?recursive&cr_identifier=%s&depth=*' - % CatalogRecord.objects.get(pk=1).identifier) + response = self.client.get( + "/rest/v2/directories/1/files?recursive&cr_identifier=%s&depth=*" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - file_list = list(File.objects.filter(record__pk=1).values_list('id', flat=True)) + file_list = list(File.objects.filter(record__pk=1).values_list("id", flat=True)) self.assertEqual(len(response.data), len(file_list)) for f in response.data: - self.assertTrue(f['id'] in file_list) + self.assertTrue(f["id"] in file_list) - response = self.client.get('/rest/v2/directories/1/files?recursive&cr_identifier=1&depth=*&directories_only') + response = self.client.get( + "/rest/v2/directories/1/files?recursive&cr_identifier=1&depth=*&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['directories'][0]['directories']), 1) - self.assertEqual(len(response.data['directories'][0]['directories'][0]['directories']), 0) - self.assertFalse(response.data.get('files')) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["directories"][0]["directories"]), 1) + self.assertEqual(len(response.data["directories"][0]["directories"][0]["directories"]), 0) + self.assertFalse(response.data.get("files")) # not found cr_identifier should raise 400 instead of 404, which is raised when the # directory itself is not found. the error contains details about the 400 - response = self.client.get('/rest/v2/directories/1/files?recursive&cr_identifier=notexisting') + response = self.client.get( + "/rest/v2/directories/1/files?recursive&cr_identifier=notexisting" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_directory_for_not_catalog_record_recursively(self): """ Test query parameters 'not_cr_identifier' with 'recursive'. """ - file_recursive = self.client.get('/rest/v2/directories/1/files?recursive&depth=*').data - file_list = list(File.objects.filter(record__pk=1).values_list('id', flat=True)) - response = self.client.get('/rest/v2/directories/1/files?recursive&depth=*¬_cr_identifier=%s' - % CatalogRecord.objects.get(pk=1).identifier) + file_recursive = self.client.get("/rest/v2/directories/1/files?recursive&depth=*").data + file_list = list(File.objects.filter(record__pk=1).values_list("id", flat=True)) + response = self.client.get( + "/rest/v2/directories/1/files?recursive&depth=*¬_cr_identifier=%s" + % CatalogRecord.objects.get(pk=1).identifier + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), len(file_recursive) - len(file_list)) for f in response.data: - self.assertTrue(f['id'] not in file_list) + self.assertTrue(f["id"] not in file_list) # not found not_cr_identifier should raise 400 instead of 404, which is raised when the # directory itself is not found. the error contains details about the 400 - response = self.client.get('/rest/v2/directories/1/files?recursive¬_cr_identifier=notexisting') + response = self.client.get( + "/rest/v2/directories/1/files?recursive¬_cr_identifier=notexisting" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_directory_byte_size_and_file_count(self): @@ -561,53 +673,60 @@ def test_directory_byte_size_and_file_count(self): Test byte size and file count are calculated correctly for directories when browsing files in the context of a single record. """ + def _assert_dir_calculations(cr, dr): """ Assert directory numbers received from browsing-api matches what exists in the db when making a reasonably fool-proof query of files by directory path """ - self.assertEqual('byte_size' in dr, True) - self.assertEqual('file_count' in dr, True) + self.assertEqual("byte_size" in dr, True) + self.assertEqual("file_count" in dr, True) - byte_size = cr.files.filter(file_path__startswith='%s/' % dr['directory_path']) \ - .aggregate(Sum('byte_size'))['byte_size__sum'] + byte_size = cr.files.filter( + file_path__startswith="%s/" % dr["directory_path"] + ).aggregate(Sum("byte_size"))["byte_size__sum"] - file_count = cr.files.filter(file_path__startswith='%s/' % dr['directory_path']).count() + file_count = cr.files.filter(file_path__startswith="%s/" % dr["directory_path"]).count() - self.assertEqual(dr['byte_size'], byte_size, 'path: %s' % dr['directory_path']) - self.assertEqual(dr['file_count'], file_count, 'path: %s' % dr['directory_path']) + self.assertEqual(dr["byte_size"], byte_size, "path: %s" % dr["directory_path"]) + self.assertEqual(dr["file_count"], file_count, "path: %s" % dr["directory_path"]) # prepare a new test dataset which contains a directory from testdata, which contains a decent # qty of files and complexity - dr = Directory.objects.get(directory_path='/prj_112_root') - cr_data = self.client.get('/rest/v2/datasets/1?include_user_metadata').data - cr_data.pop('id') - cr_data.pop('identifier') - cr_data['research_dataset'].pop('preferred_identifier') - cr_data['research_dataset'].pop('files', None) - cr_data['research_dataset']['directories'] = [{ - 'identifier': dr.identifier, - 'title': 'test dir', - 'use_category': { 'identifier': 'outcome' } - }] - self._use_http_authorization(username='metax') - response = self.client.post('/rest/v2/datasets', cr_data, format='json') + dr = Directory.objects.get(directory_path="/prj_112_root") + cr_data = self.client.get("/rest/v2/datasets/1?include_user_metadata").data + cr_data.pop("id") + cr_data.pop("identifier") + cr_data["research_dataset"].pop("preferred_identifier") + cr_data["research_dataset"].pop("files", None) + cr_data["research_dataset"]["directories"] = [ + { + "identifier": dr.identifier, + "title": "test dir", + "use_category": {"identifier": "outcome"}, + } + ] + self._use_http_authorization(username="metax") + response = self.client.post("/rest/v2/datasets", cr_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr_data = response.data - cr = CatalogRecord.objects.get(pk=cr_data['id']) + cr = CatalogRecord.objects.get(pk=cr_data["id"]) # begin tests # test: browse the file api, and receive a list of sub-directories - response = self.client.get('/rest/v2/directories/%d/files?cr_identifier=%s' % (dr.id, cr.identifier)) + response = self.client.get( + "/rest/v2/directories/%d/files?cr_identifier=%s" % (dr.id, cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - for directory in response.data['directories']: + for directory in response.data["directories"]: _assert_dir_calculations(cr, directory) # test: browse with ?include_parent=true to get the dir directly that was added to the dataset - response = self.client.get('/rest/v2/directories/%d/files?cr_identifier=%s&include_parent' - % (dr.id, cr.identifier)) + response = self.client.get( + "/rest/v2/directories/%d/files?cr_identifier=%s&include_parent" % (dr.id, cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) _assert_dir_calculations(cr, response.data) @@ -615,38 +734,53 @@ def test_directory_byte_size_and_file_count_in_parent_directories(self): cr_id = 13 def _assertDirectoryData(id, parent_data): - response = self.client.get('/rest/v2/directories/%d/files?cr_identifier=%d&include_parent' % (id, cr_id)) + response = self.client.get( + "/rest/v2/directories/%d/files?cr_identifier=%d&include_parent" % (id, cr_id) + ) - self.assertEqual(response.data['byte_size'], parent_data[id][0], response.data['id']) - self.assertEqual(response.data['file_count'], parent_data[id][1]) + self.assertEqual(response.data["byte_size"], parent_data[id][0], response.data["id"]) + self.assertEqual(response.data["file_count"], parent_data[id][1]) - if response.data.get('parent_directory'): - return _assertDirectoryData(response.data['parent_directory']['id'], parent_data) + if response.data.get("parent_directory"): + return _assertDirectoryData(response.data["parent_directory"]["id"], parent_data) def _assertDirectoryData_not_cr_id(id, parent_data): - total_dir_res = self.client.get('/rest/v2/directories/%d' % id) - total_dir_size = (total_dir_res.data.get('byte_size', None), total_dir_res.data.get('file_count', None)) - - response = self.client.get('/rest/v2/directories/%s/files?not_cr_identifier=%d&include_parent' % - (id, cr_id)) - if response.data.get('id'): - self.assertEqual(response.data['byte_size'], total_dir_size[0] - parent_data[id][0]) - self.assertEqual(response.data['file_count'], total_dir_size[1] - parent_data[id][1]) - - if response.data.get('parent_directory', None): - return _assertDirectoryData_not_cr_id(response.data['parent_directory']['id'], parent_data) + total_dir_res = self.client.get("/rest/v2/directories/%d" % id) + total_dir_size = ( + total_dir_res.data.get("byte_size", None), + total_dir_res.data.get("file_count", None), + ) + + response = self.client.get( + "/rest/v2/directories/%s/files?not_cr_identifier=%d&include_parent" % (id, cr_id) + ) + if response.data.get("id"): + self.assertEqual(response.data["byte_size"], total_dir_size[0] - parent_data[id][0]) + self.assertEqual( + response.data["file_count"], total_dir_size[1] - parent_data[id][1] + ) + + if response.data.get("parent_directory", None): + return _assertDirectoryData_not_cr_id( + response.data["parent_directory"]["id"], parent_data + ) def _get_parent_dir_data_for_cr(id): - def _get_parents(pk): pk = Directory.objects.get(pk=pk).parent_directory_id if pk: pks.append(pk) _get_parents(pk) - cr_data = CatalogRecord.objects.get(pk=id).files.order_by('parent_directory_id').values_list( - 'parent_directory_id').annotate(Sum('byte_size'), Count('id')) - grouped_cr_data = {parent_id: [byte_size, file_count] for parent_id, byte_size, file_count in cr_data} + cr_data = ( + CatalogRecord.objects.get(pk=id) + .files.order_by("parent_directory_id") + .values_list("parent_directory_id") + .annotate(Sum("byte_size"), Count("id")) + ) + grouped_cr_data = { + parent_id: [byte_size, file_count] for parent_id, byte_size, file_count in cr_data + } drs = {} for dir in grouped_cr_data.keys(): @@ -666,12 +800,16 @@ def _get_parents(pk): # begin tests - cr = self.client.get('/rest/v2/datasets/%d?include_user_metadata&file_details&' - 'directory_fields=id,byte_size,file_count,parent_directory&' - 'file_fields=id,byte_size,parent_directory' % cr_id) + cr = self.client.get( + "/rest/v2/datasets/%d?include_user_metadata&file_details&" + "directory_fields=id,byte_size,file_count,parent_directory&" + "file_fields=id,byte_size,parent_directory" % cr_id + ) - dirs = [d['details']['id'] for d in cr.data['research_dataset'].get('directories', [])] + \ - [f['details']['parent_directory']['id'] for f in cr.data['research_dataset'].get('files', [])] + dirs = [d["details"]["id"] for d in cr.data["research_dataset"].get("directories", [])] + [ + f["details"]["parent_directory"]["id"] + for f in cr.data["research_dataset"].get("files", []) + ] parent_data = _get_parent_dir_data_for_cr(cr_id) @@ -693,28 +831,32 @@ def test_returns_ok_for_open_catalog_record_if_no_authorization(self): # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files even without authorization # for open catalog record - self._assert_ok(open_cr_json, 'no') + self._assert_ok(open_cr_json, "no") def test_returns_ok_for_login_catalog_record_if_no_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(use_login_access_type=True) + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + use_login_access_type=True + ) # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files even without authorization # for login catalog record - self._assert_ok(login_cr_json, 'no') + self._assert_ok(login_cr_json, "no") def test_returns_ok_for_open_catalog_record_if_service_authorization(self): open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with service # authorization for open catalog record - self._assert_ok(open_cr_json, 'service') + self._assert_ok(open_cr_json, "service") def test_returns_ok_for_login_catalog_record_if_service_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(use_login_access_type=True) + login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( + use_login_access_type=True + ) # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with service # authorization for login catalog record - self._assert_ok(login_cr_json, 'service') + self._assert_ok(login_cr_json, "service") @responses.activate def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): @@ -723,7 +865,7 @@ def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with owner authorization for # owner-owned open catalog record - self._assert_ok(open_cr_json, 'owner') + self._assert_ok(open_cr_json, "owner") @responses.activate def test_returns_ok_for_login_catalog_record_if_owner_authorization(self): @@ -732,30 +874,36 @@ def test_returns_ok_for_login_catalog_record_if_owner_authorization(self): # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with owner authorization for # owner-owned login_cr_json catalog record - self._assert_ok(login_cr_json, 'owner') + self._assert_ok(login_cr_json, "owner") def test_returns_ok_for_restricted_catalog_record_if_service_authorization(self): restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with service # authorization for restricted catalog record - self._assert_ok(restricted_cr_json, 'service') + self._assert_ok(restricted_cr_json, "service") @responses.activate def test_returns_ok_for_restricted_catalog_record_if_owner_authorization(self): self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details(True) + restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( + True + ) # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with owner authorization for # owner-owned restricted catalog record - self._assert_ok(restricted_cr_json, 'owner') + self._assert_ok(restricted_cr_json, "owner") - def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization(self): - available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization( + self, + ): + available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) + ) # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files without authorization # for embargoed catalog record whose embargo date has been reached - self._assert_ok(available_embargoed_cr_json, 'no') + self._assert_ok(available_embargoed_cr_json, "no") # THE FORBIDDEN TESTS @@ -764,39 +912,52 @@ def test_returns_forbidden_for_restricted_catalog_record_if_no_authorization(sel # Verify /rest/v2/directories//files?cr_identifier=cr_id returns forbidden without authorization # for restricted catalog record - self._assert_forbidden(restricted_cr_json, 'no') + self._assert_forbidden(restricted_cr_json, "no") - def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization(self): - not_available_embargoed_cr_json = self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details( - False) + def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( + self, + ): + not_available_embargoed_cr_json = ( + self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) + ) # Verify /rest/v2/directories//files?cr_identifier=cr_id returns forbidden without authorization # for embargoed catalog record whose embargo date has not been reached # Deactivate credentials - self._assert_forbidden(not_available_embargoed_cr_json, 'no') + self._assert_forbidden(not_available_embargoed_cr_json, "no") def _assert_forbidden(self, cr_json, credentials_type): - dir_id = cr_json['research_dataset']['directories'][0]['identifier'] - cr_id = cr_json['identifier'] + dir_id = cr_json["research_dataset"]["directories"][0]["identifier"] + cr_id = cr_json["identifier"] self._set_http_authorization(credentials_type) - response = self.client.get('/rest/v2/directories/{0}/files?cr_identifier={1}'.format(dir_id, cr_id)) + response = self.client.get( + "/rest/v2/directories/{0}/files?cr_identifier={1}".format(dir_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.get('/rest/v2/directories/{0}/files?not_cr_identifier={1}'.format(dir_id, cr_id)) + response = self.client.get( + "/rest/v2/directories/{0}/files?not_cr_identifier={1}".format(dir_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def _assert_ok(self, cr_json, credentials_type): - dir_file_amt = cr_json['research_dataset']['directories'][0]['details']['file_count'] - dir_id = cr_json['research_dataset']['directories'][0]['identifier'] - cr_id = cr_json['identifier'] + dir_file_amt = cr_json["research_dataset"]["directories"][0]["details"]["file_count"] + dir_id = cr_json["research_dataset"]["directories"][0]["identifier"] + cr_id = cr_json["identifier"] self._set_http_authorization(credentials_type) - response = self.client.get('/rest/v2/directories/{0}/files?cr_identifier={1}&recursive&depth=*' - .format(dir_id, cr_id)) + response = self.client.get( + "/rest/v2/directories/{0}/files?cr_identifier={1}&recursive&depth=*".format( + dir_id, cr_id + ) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), dir_file_amt) - response = self.client.get('/rest/v2/directories/{0}/files?not_cr_identifier={1}&recursive&depth=*' - .format(dir_id, cr_id)) + response = self.client.get( + "/rest/v2/directories/{0}/files?not_cr_identifier={1}&recursive&depth=*".format( + dir_id, cr_id + ) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(len(response.data), 0) @@ -825,115 +986,155 @@ class DirectoryApiReadQueryFiltersTogetherTests(DirectoryApiReadCommon): def test_browsing_directories_with_filters(self): # directory filters including directories_only - response = self.client.get('/rest/v2/directories/17/files? \ - directories_only&include_parent&directory_fields=id&directory_name=phase') + response = self.client.get( + "/rest/v2/directories/17/files? \ + directories_only&include_parent&directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # file filters are not suppose to break anything with directories_only - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2') + file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds cr_identifier - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2') + file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds not_cr_identifier - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent¬_cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2') + file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # cr_identifier and not_cr_identifier are NOT suppose to work together - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent&cr_identifier=11¬_cr_identifier=13& \ - directory_fields=id&directory_name=phase&file_fields=id&file_name=2') + directory_fields=id&directory_name=phase&file_fields=id&file_name=2" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # adds pagination - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&pagination') + file_fields=id&file_name=2&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds recursive - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&recursive&depth=*') + file_fields=id&file_name=2&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds recursive and pagination - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&recursive&depth=*') + file_fields=id&file_name=2&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_browsing_files_and_directories_with_filters(self): # file filters - response = self.client.get('/rest/v2/directories/17/files? \ - include_parent&file_fields=id&file_name=file') + response = self.client.get( + "/rest/v2/directories/17/files? \ + include_parent&file_fields=id&file_name=file" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds directory filters - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds cr_identifier - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds not_cr_identifier - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent¬_cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # cr_identifier and not_cr_identifier are NOT suppose to work together - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent&cr_identifier=13¬_cr_identifier=11&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase') + directory_fields=id&directory_name=phase" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # adds recursive, directory filters are not suppose to break anything - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&recursive&depth=*') + directory_fields=id&directory_name=phase&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds pagination - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&pagination') + directory_fields=id&directory_name=phase&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # adds recursive and pagination - response = self.client.get('/rest/v2/directories/17/files? \ + response = self.client.get( + "/rest/v2/directories/17/files? \ include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&recursive&depth=*&pagination') + directory_fields=id&directory_name=phase&recursive&depth=*&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) class DirectoryApiReadCatalogRecordFileBrowsingRetrieveSpecificFieldsTests(DirectoryApiReadCommon): - def setUp(self): super().setUp() CatalogRecord.objects.get(pk=12).calculate_directory_byte_sizes_and_file_counts() def test_retrieve_requested_directory_fields_only(self): response = self.client.get( - '/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,directory_path' + "/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,directory_path" ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['directories'][0]['details'], True) - self.assertEqual('directory_path' in response.data['research_dataset']['directories'][0]['details'], True) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 2, + ) + self.assertEqual( + "identifier" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) + self.assertEqual( + "directory_path" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) def test_retrieve_directory_byte_size_and_file_count(self): """ @@ -941,49 +1142,81 @@ def test_retrieve_directory_byte_size_and_file_count(self): targeted tests for just those fields. """ response = self.client.get( - '/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,byte_size' + "/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,byte_size" ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['directories'][0]['details'], True) - self.assertEqual('byte_size' in response.data['research_dataset']['directories'][0]['details'], True) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 2, + ) + self.assertEqual( + "identifier" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) + self.assertEqual( + "byte_size" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) response = self.client.get( - '/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,file_count' + "/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,file_count" ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['directories'][0]['details'], True) - self.assertEqual('file_count' in response.data['research_dataset']['directories'][0]['details'], True) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 2, + ) + self.assertEqual( + "identifier" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) + self.assertEqual( + "file_count" in response.data["research_dataset"]["directories"][0]["details"], + True, + ) def test_retrieve_requested_file_fields_only(self): response = self.client.get( - '/rest/v2/datasets/12?include_user_metadata&file_details&file_fields=identifier,file_path' + "/rest/v2/datasets/12?include_user_metadata&file_details&file_fields=identifier,file_path" ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files'][0]['details'].keys()), 2) - self.assertEqual('identifier' in response.data['research_dataset']['files'][0]['details'], True) - self.assertEqual('file_path' in response.data['research_dataset']['files'][0]['details'], True) + self.assertEqual(len(response.data["research_dataset"]["files"][0]["details"].keys()), 2) + self.assertEqual( + "identifier" in response.data["research_dataset"]["files"][0]["details"], + True, + ) + self.assertEqual( + "file_path" in response.data["research_dataset"]["files"][0]["details"], + True, + ) def test_retrieve_requested_file_and_directory_fields_only(self): response = self.client.get( - '/rest/v2/datasets/12?include_user_metadata&file_details&file_fields=identifier&directory_fields=id' + "/rest/v2/datasets/12?include_user_metadata&file_details&file_fields=identifier&directory_fields=id" ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['files'][0]['details'].keys()), 1) - self.assertEqual('identifier' in response.data['research_dataset']['files'][0]['details'], True) - self.assertEqual(len(response.data['research_dataset']['directories'][0]['details'].keys()), 1) - self.assertEqual('id' in response.data['research_dataset']['directories'][0]['details'], True) + self.assertEqual(len(response.data["research_dataset"]["files"][0]["details"].keys()), 1) + self.assertEqual( + "identifier" in response.data["research_dataset"]["files"][0]["details"], + True, + ) + self.assertEqual( + len(response.data["research_dataset"]["directories"][0]["details"].keys()), + 1, + ) + self.assertEqual( + "id" in response.data["research_dataset"]["directories"][0]["details"], True + ) class DirectoryApiReadEndUserAccess(DirectoryApiReadCommon): - ''' + """ Test End User Access permissions when browsing files using /rest/v2/directories api. Note: In these tests, the token by default does not have correct project groups. Token project groups are only made valid by calling _update_token_with_project_of_directory(). - ''' + """ def setUp(self): super().setUp() @@ -992,76 +1225,92 @@ def setUp(self): def _update_token_with_project_of_directory(self, dir_id): proj = Directory.objects.get(pk=dir_id).project_identifier - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) @responses.activate def test_user_can_browse_files_from_their_projects(self): - ''' + """ Ensure users can only read files from /rest/v2/directories owned by them. - ''' - self._use_http_authorization(method='bearer', token=self.token) + """ + self._use_http_authorization(method="bearer", token=self.token) # first read files without project access - should fail - response = self.client.get('/rest/v2/directories/1') + response = self.client.get("/rest/v2/directories/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get('/rest/v2/directories/1/files') + response = self.client.get("/rest/v2/directories/1/files") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # set user to same project as previous files and try again. should now succeed self._update_token_with_project_of_directory(1) - response = self.client.get('/rest/v2/directories/1') + response = self.client.get("/rest/v2/directories/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/directories/1/files') + response = self.client.get("/rest/v2/directories/1/files") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @responses.activate def test_browsing_by_project_and_file_path_is_protected(self): - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) dr = Directory.objects.get(pk=2) - response = self.client.get('/rest/v2/directories/files?path=%s&project=%s' % - (dr.directory_path, dr.project_identifier)) + response = self.client.get( + "/rest/v2/directories/files?path=%s&project=%s" + % (dr.directory_path, dr.project_identifier) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) self._update_token_with_project_of_directory(2) - response = self.client.get('/rest/v2/directories/files?path=%s&project=%s' % - (dr.directory_path, dr.project_identifier)) + response = self.client.get( + "/rest/v2/directories/files?path=%s&project=%s" + % (dr.directory_path, dr.project_identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @responses.activate def test_browsing_in_cr_context(self): - ''' + """ Cr with open access type should be available for any end-user api user. Browsing files for a cr with restricted access type should be forbidden for non-owner (or service) user. - ''' + """ cr = CatalogRecord.objects.get(pk=1) - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.get('/rest/v2/directories/3/files?cr_identifier={0}'.format(cr.identifier)) + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.get( + "/rest/v2/directories/3/files?cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr.research_dataset['access_rights']['access_type']['identifier'] = ACCESS_TYPES['restricted'] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] cr.force_save() - response = self.client.get('/rest/v2/directories/3/files?cr_identifier={0}'.format(cr.identifier)) + response = self.client.get( + "/rest/v2/directories/3/files?cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @responses.activate def test_browsing_in_not_cr_context(self): - ''' + """ Cr with open access type should be available for any end-user api user. Browsing files for a cr with restricted access type should be forbidden for non-owner (or service) user. - ''' + """ cr = CatalogRecord.objects.get(pk=1) - self._use_http_authorization(method='bearer', token=self.token) - response = self.client.get('/rest/v2/directories/3/files?not_cr_identifier={0}'.format(cr.identifier)) + self._use_http_authorization(method="bearer", token=self.token) + response = self.client.get( + "/rest/v2/directories/3/files?not_cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - cr.research_dataset['access_rights']['access_type']['identifier'] = ACCESS_TYPES['restricted'] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] cr.force_save() - response = self.client.get('/rest/v2/directories/3/files?not_cr_identifier={0}'.format(cr.identifier)) + response = self.client.get( + "/rest/v2/directories/3/files?not_cr_identifier={0}".format(cr.identifier) + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @@ -1080,149 +1329,193 @@ def test_read_directory_with_default_limit_pagination(self): """ Test browsing files with pagination """ - file_dict = self._get_dirs_files_ids('/rest/v2/directories/24/files') + file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files") - response = self.client.get('/rest/v2/directories/24/files?pagination') + response = self.client.get("/rest/v2/directories/24/files?pagination") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(len(response.data['results']['files']), 0) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict['directories'][9]) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual(len(response.data["results"]["files"]), 0) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][0], + ) + self.assertEqual( + response.data["results"]["directories"][9]["id"], + file_dict["directories"][9], + ) - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 6) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][13]) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][5]['id'], file_dict['files'][5]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 6) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][10], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][13], + ) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 0) - self.assertEqual(len(response.data['results']['files']), 10) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][6]) - self.assertEqual(response.data['results']['files'][9]['id'], file_dict['files'][15]) + self.assertEqual(len(response.data["results"]["directories"]), 0) + self.assertEqual(len(response.data["results"]["files"]), 10) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][6]) + self.assertEqual(response.data["results"]["files"][9]["id"], file_dict["files"][15]) - prev_link = response.data['previous'].split('http://testserver')[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 6) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][13]) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][5]['id'], file_dict['files'][5]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 6) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][10], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][13], + ) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) def test_read_directory_with_custom_limit_pagination(self): - file_dict = self._get_dirs_files_ids('/rest/v2/directories/24/files') + file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files") - response = self.client.get('/rest/v2/directories/24/files?limit=4&offset=12&pagination') + response = self.client.get("/rest/v2/directories/24/files?limit=4&offset=12&pagination") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 2) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][12]) - self.assertEqual(response.data['results']['directories'][1]['id'], file_dict['directories'][13]) - self.assertEqual(len(response.data['results']['files']), 2) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][1]['id'], file_dict['files'][1]) + self.assertEqual(len(response.data["results"]["directories"]), 2) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][12], + ) + self.assertEqual( + response.data["results"]["directories"][1]["id"], + file_dict["directories"][13], + ) + self.assertEqual(len(response.data["results"]["files"]), 2) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][1]["id"], file_dict["files"][1]) - next_link = response.data['next'].split('http://testserver')[1] - prev_link = response.data['previous'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 0) - self.assertEqual(len(response.data['results']['files']), 4) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][2]) - self.assertEqual(response.data['results']['files'][3]['id'], file_dict['files'][5]) + self.assertEqual(len(response.data["results"]["directories"]), 0) + self.assertEqual(len(response.data["results"]["files"]), 4) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][2]) + self.assertEqual(response.data["results"]["files"][3]["id"], file_dict["files"][5]) response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 0) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][8]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][11]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 0) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][8], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][11], + ) def test_read_directory_with_recursive_and_pagination(self): - ''' + """ Query with recursive flag must return only files as a list - ''' - file_list = self._get_dirs_files_ids('/rest/v2/directories/24/files?recursive') + """ + file_list = self._get_dirs_files_ids("/rest/v2/directories/24/files?recursive") - response = self.client.get('/rest/v2/directories/24/files?recursive&pagination') + response = self.client.get("/rest/v2/directories/24/files?recursive&pagination") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['id'], file_list[0]) - self.assertEqual(response.data['results'][9]['id'], file_list[9]) + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual(response.data["results"][0]["id"], file_list[0]) + self.assertEqual(response.data["results"][9]["id"], file_list[9]) - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['id'], file_list[10]) - self.assertEqual(response.data['results'][9]['id'], file_list[19]) + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual(response.data["results"][0]["id"], file_list[10]) + self.assertEqual(response.data["results"][9]["id"], file_list[19]) - prev_link = response.data['previous'].split('http://testserver')[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']), 10) - self.assertEqual(response.data['results'][0]['id'], file_list[0]) - self.assertEqual(response.data['results'][9]['id'], file_list[9]) + self.assertEqual(len(response.data["results"]), 10) + self.assertEqual(response.data["results"][0]["id"], file_list[0]) + self.assertEqual(response.data["results"][9]["id"], file_list[9]) def test_read_directory_with_dirs_only_and_pagination(self): - ''' + """ Query with directories_only flag must return only directories - ''' - file_dict = self._get_dirs_files_ids('/rest/v2/directories/24/files?directories_only')['directories'] + """ + file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files?directories_only")[ + "directories" + ] - response = self.client.get('/rest/v2/directories/24/files?directories_only&pagination=true') + response = self.client.get("/rest/v2/directories/24/files?directories_only&pagination=true") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict[0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict[9]) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[0]) + self.assertEqual(response.data["results"]["directories"][9]["id"], file_dict[9]) - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict[10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict[13]) + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[10]) + self.assertEqual(response.data["results"]["directories"][3]["id"], file_dict[13]) - prev_link = response.data['previous'].split('http://testserver')[1] + prev_link = response.data["previous"].split("http://testserver")[1] response = self.client.get(prev_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict[0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict[9]) + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[0]) + self.assertEqual(response.data["results"]["directories"][9]["id"], file_dict[9]) def test_read_directory_with_parent_and_pagination(self): - ''' + """ Query with directories_only flag must return only directories - ''' - file_dict = self._get_dirs_files_ids('/rest/v2/directories/24/files?include_parent') + """ + file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files?include_parent") - response = self.client.get('/rest/v2/directories/24/files?include_parent&pagination=true') + response = self.client.get("/rest/v2/directories/24/files?include_parent&pagination=true") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 10) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][0]) - self.assertEqual(response.data['results']['directories'][9]['id'], file_dict['directories'][9]) - self.assertEqual(response.data['results']['id'], 24) - self.assertEqual(response.data['results']['directory_name'], "10") + self.assertEqual(len(response.data["results"]["directories"]), 10) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][0], + ) + self.assertEqual( + response.data["results"]["directories"][9]["id"], + file_dict["directories"][9], + ) + self.assertEqual(response.data["results"]["id"], 24) + self.assertEqual(response.data["results"]["directory_name"], "10") - next_link = response.data['next'].split('http://testserver')[1] + next_link = response.data["next"].split("http://testserver")[1] response = self.client.get(next_link) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 4) - self.assertEqual(len(response.data['results']['files']), 6) - self.assertEqual(response.data['results']['directories'][0]['id'], file_dict['directories'][10]) - self.assertEqual(response.data['results']['directories'][3]['id'], file_dict['directories'][13]) - self.assertEqual(response.data['results']['files'][0]['id'], file_dict['files'][0]) - self.assertEqual(response.data['results']['files'][5]['id'], file_dict['files'][5]) - self.assertEqual(response.data['results']['id'], 24) - self.assertEqual(response.data['results']['directory_name'], "10") + self.assertEqual(len(response.data["results"]["directories"]), 4) + self.assertEqual(len(response.data["results"]["files"]), 6) + self.assertEqual( + response.data["results"]["directories"][0]["id"], + file_dict["directories"][10], + ) + self.assertEqual( + response.data["results"]["directories"][3]["id"], + file_dict["directories"][13], + ) + self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) + self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) + self.assertEqual(response.data["results"]["id"], 24) + self.assertEqual(response.data["results"]["directory_name"], "10") class DirectoryApiReadFileNameDirectoryNameTests(DirectoryApiReadCommon): @@ -1237,144 +1530,178 @@ def setUp(self): def test_browsing_directory_with_file_name(self): - response = self.client.get('/rest/v2/directories/24/files?file_name=') + response = self.client.get("/rest/v2/directories/24/files?file_name=") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 51) + self.assertEqual(len(response.data["files"]), 51) - response = self.client.get('/rest/v2/directories/24/files?file_name=_name_1') + response = self.client.get("/rest/v2/directories/24/files?file_name=_name_1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 21) + self.assertEqual(len(response.data["files"]), 21) - response = self.client.get('/rest/v2/directories/24/files?file_name=0') + response = self.client.get("/rest/v2/directories/24/files?file_name=0") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 15) + self.assertEqual(len(response.data["files"]), 15) - response = self.client.get('/rest/v2/directories/24/files?file_name=_name_118') + response = self.client.get("/rest/v2/directories/24/files?file_name=_name_118") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) def test_browsing_directory_with_directory_name(self): - response = self.client.get('/rest/v2/directories/24/files?directory_name=') + response = self.client.get("/rest/v2/directories/24/files?directory_name=") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) + self.assertEqual(len(response.data["directories"]), 5) - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir_1') + response = self.client.get("/rest/v2/directories/24/files?directory_name=dir_1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) + self.assertEqual(len(response.data["directories"]), 1) - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir') + response = self.client.get("/rest/v2/directories/24/files?directory_name=dir") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) + self.assertEqual(len(response.data["directories"]), 5) def test_browsing_directory_with_directory_and_file_name(self): - response = self.client.get('/rest/v2/directories/24/files?directory_name=&file_name=') + response = self.client.get("/rest/v2/directories/24/files?directory_name=&file_name=") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 5) - self.assertEqual(len(response.data['files']), 51) + self.assertEqual(len(response.data["directories"]), 5) + self.assertEqual(len(response.data["files"]), 51) - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir_1&file_name=file_name_120') + response = self.client.get( + "/rest/v2/directories/24/files?directory_name=dir_1&file_name=file_name_120" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir&file_name=not_existing') + response = self.client.get( + "/rest/v2/directories/24/files?directory_name=dir&file_name=not_existing" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 5) - self.assertEqual(len(response.data['files']), 0) + self.assertEqual(len(response.data["directories"]), 5) + self.assertEqual(len(response.data["files"]), 0) def test_browsing_directory_with_file_and_dir_name_and_pagination(self): # second page should return last filtered files - response = self.client.get('/rest/v2/directories/24/files?file_name=0&pagination&limit=10&offset=10') + response = self.client.get( + "/rest/v2/directories/24/files?file_name=0&pagination&limit=10&offset=10" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 0) - self.assertEqual(len(response.data['results']['files']), 10) + self.assertEqual(len(response.data["results"]["directories"]), 0) + self.assertEqual(len(response.data["results"]["files"]), 10) # first page with limit of 3 should return first filtered directories - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir_&pagination&limit=3') + response = self.client.get( + "/rest/v2/directories/24/files?directory_name=dir_&pagination&limit=3" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 3) - self.assertEqual(len(response.data['results']['files']), 0) + self.assertEqual(len(response.data["results"]["directories"]), 3) + self.assertEqual(len(response.data["results"]["files"]), 0) # first page with limit of 3 should return first filtered directories - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir_1&file_name=0&pagination') + response = self.client.get( + "/rest/v2/directories/24/files?directory_name=dir_1&file_name=0&pagination" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['results']['directories']), 1) - self.assertEqual(len(response.data['results']['files']), 9) + self.assertEqual(len(response.data["results"]["directories"]), 1) + self.assertEqual(len(response.data["results"]["files"]), 9) def test_browsing_directory_with_directory_and_file_name_and_dirs_only(self): - response = self.client.get('/rest/v2/directories/24/files?file_name=_name_11&directories_only') + response = self.client.get( + "/rest/v2/directories/24/files?file_name=_name_11&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 5) - self.assertEqual(response.data.get('files'), None) + self.assertEqual(len(response.data["directories"]), 5) + self.assertEqual(response.data.get("files"), None) - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir_5&directories_only') + response = self.client.get( + "/rest/v2/directories/24/files?directory_name=dir_5&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['directories']), 1) - self.assertEqual(response.data.get('files'), None) + self.assertEqual(len(response.data["directories"]), 1) + self.assertEqual(response.data.get("files"), None) def test_browsing_directory_with_directory_and_file_name_and_recursive(self): - response = self.client.get('/rest/v2/directories/24/files?file_name=_name_11&recursive') + response = self.client.get("/rest/v2/directories/24/files?file_name=_name_11&recursive") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 10) # should have one file from directory with the rest of filtered files - response = self.client.get('/rest/v2/directories/24/files?directory_name=dir_5&file_name=5&recursive&depth=*') + response = self.client.get( + "/rest/v2/directories/24/files?directory_name=dir_5&file_name=5&recursive&depth=*" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), 6) - def test_browsing_directory_with_directory_and_file_name_and_cr_identifier_and_not_cr_identifier(self): + def test_browsing_directory_with_directory_and_file_name_and_cr_identifier_and_not_cr_identifier( + self, + ): # tests for directory_name and cr_identifier - response = self.client.get('/rest/v2/directories/17/files?directory_name=2&cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/17/files?directory_name=2&cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 1) + self.assertEqual(len(response.data["directories"]), 1) - response = self.client.get('/rest/v2/directories/17/files?directory_name=phase&cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/17/files?directory_name=phase&cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 2) + self.assertEqual(len(response.data["directories"]), 2) - response = self.client.get('/rest/v2/directories/17/files?directory_name=&cr_identifier=13') + response = self.client.get("/rest/v2/directories/17/files?directory_name=&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 2) + self.assertEqual(len(response.data["directories"]), 2) # tests for directory_name and not_cr_identifier - response = self.client.get('/rest/v2/directories/17/files?directory_name=phase¬_cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/17/files?directory_name=phase¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 0) + self.assertEqual(len(response.data["directories"]), 0) - response = self.client.get('/rest/v2/directories/17/files?directory_name=2¬_cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/17/files?directory_name=2¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['directories']), 0) + self.assertEqual(len(response.data["directories"]), 0) # tests for file_name and cr_identifier - response = self.client.get('/rest/v2/directories/12/files?file_name=22&cr_identifier=13') + response = self.client.get("/rest/v2/directories/12/files?file_name=22&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/v2/directories/12/files?file_name=name_&cr_identifier=13') + response = self.client.get("/rest/v2/directories/12/files?file_name=name_&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 3) + self.assertEqual(len(response.data["files"]), 3) - response = self.client.get('/rest/v2/directories/12/files?file_name=&cr_identifier=13') + response = self.client.get("/rest/v2/directories/12/files?file_name=&cr_identifier=13") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 3) + self.assertEqual(len(response.data["files"]), 3) - response = self.client.get('/rest/v2/directories/17/files?file_name=&cr_identifier=13&directories_only') + response = self.client.get( + "/rest/v2/directories/17/files?file_name=&cr_identifier=13&directories_only" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('files'), None) + self.assertEqual(response.data.get("files"), None) # tests for file_name and not_cr_identifier - response = self.client.get('/rest/v2/directories/16/files?file_name=name¬_cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/16/files?file_name=name¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/v2/directories/16/files?file_name=name_2¬_cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/16/files?file_name=name_2¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 1) + self.assertEqual(len(response.data["files"]), 1) - response = self.client.get('/rest/v2/directories/16/files?file_name=name_1¬_cr_identifier=13') + response = self.client.get( + "/rest/v2/directories/16/files?file_name=name_1¬_cr_identifier=13" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['files']), 0) + self.assertEqual(len(response.data["files"]), 0) diff --git a/src/metax_api/tests/api/rest/v2/views/directories/write.py b/src/metax_api/tests/api/rest/v2/views/directories/write.py index e7460776..a7526984 100755 --- a/src/metax_api/tests/api/rest/v2/views/directories/write.py +++ b/src/metax_api/tests/api/rest/v2/views/directories/write.py @@ -20,17 +20,17 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DirectoryApiWriteCommon, cls).setUpClass() def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) - dir_from_test_data = self._get_object_from_test_data('directory') - self.identifier = dir_from_test_data['identifier'] - self.directory_name = dir_from_test_data['directory_name'] + call_command("loaddata", test_data_file_path, verbosity=0) + dir_from_test_data = self._get_object_from_test_data("directory") + self.identifier = dir_from_test_data["identifier"] + self.directory_name = dir_from_test_data["directory_name"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified @@ -41,141 +41,195 @@ def setUp(self): self._use_http_authorization() def _get_new_test_data(self): - from_test_data = self._get_object_from_test_data('directory', requested_index=0) - from_test_data.update({ - "identifier": "urn:nbn:fi:csc-ida201401200000000001", - }) + from_test_data = self._get_object_from_test_data("directory", requested_index=0) + from_test_data.update( + { + "identifier": "urn:nbn:fi:csc-ida201401200000000001", + } + ) return from_test_data def _get_second_new_test_data(self): from_test_data = self._get_new_test_data() - from_test_data.update({ - "identifier": "urn:nbn:fi:csc-ida201401200000000002", - }) + from_test_data.update( + { + "identifier": "urn:nbn:fi:csc-ida201401200000000002", + } + ) return from_test_data class DirectoryApiWriteTests(DirectoryApiWriteCommon): - def test_create_files_for_catalog_record(self): """ Tests flow of creating files and assigning them to dataset. """ - project = 'project-test-files' - directory_path = '/dir/' + project = "project-test-files" + directory_path = "/dir/" files = [] for n in range(1, 4): - file_path = directory_path + 'file' + str(n) - f = self._get_new_file_data(str(n), project=project, file_path=file_path, - directory_path=directory_path, open_access=True) - f.pop('parent_directory', None) + file_path = directory_path + "file" + str(n) + f = self._get_new_file_data( + str(n), + project=project, + file_path=file_path, + directory_path=directory_path, + open_access=True, + ) + f.pop("parent_directory", None) files.append(f) cr = self._get_ida_dataset_without_files() - fields = 'file_fields=id,identifier,file_path&directory_fields=id,identifier,directory_path,file_count' + fields = "file_fields=id,identifier,file_path&directory_fields=id,identifier,directory_path,file_count" # start test # - self._set_http_authorization('service') + self._set_http_authorization("service") # adding file1 to /dir/ - response = self.client.post('/rest/v2/files', files[0], format='json') + response = self.client.post("/rest/v2/files", files[0], format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - file1_id = response.data['identifier'] + file1_id = response.data["identifier"] # adding file2 to /dir/ - response = self.client.post('/rest/v2/files', files[1], format='json') + response = self.client.post("/rest/v2/files", files[1], format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - file2_id = response.data['identifier'] + file2_id = response.data["identifier"] # adding file3 to /dir/ - response = self.client.post('/rest/v2/files', files[2], format='json') + response = self.client.post("/rest/v2/files", files[2], format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - file3_id = response.data['identifier'] + file3_id = response.data["identifier"] # creating dataset - response = self.client.post('/rest/v2/datasets?draft=true', cr, format='json') + response = self.client.post("/rest/v2/datasets?draft=true", cr, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] # getting dataset root directory identifier (%2F=='/') - root_dir = self.client.get('/rest/v2/directories/files?project={}&path=%2F&include_parent'.format(project)) - root_id = root_dir.data['id'] + root_dir = self.client.get( + "/rest/v2/directories/files?project={}&path=%2F&include_parent".format(project) + ) + root_id = root_dir.data["id"] # getting dataset files from / - response = self.client.get('/rest/v2/directories/files?cr_identifier={}&project={}&path=%2F&{}' - .format(cr_id, project, fields)) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Directory must be empty') + response = self.client.get( + "/rest/v2/directories/files?cr_identifier={}&project={}&path=%2F&{}".format( + cr_id, project, fields + ) + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, "Directory must be empty") # adding file1 to dataset - first_file = { - 'files': [ - {"identifier": file1_id} - ]} + first_file = {"files": [{"identifier": file1_id}]} - response = self.client.post('/rest/v2/datasets/{}/files'.format(cr_id), first_file, format='json') + response = self.client.post( + "/rest/v2/datasets/{}/files".format(cr_id), first_file, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # getting dataset files from / - response = self.client.get('/rest/v2/directories/{}/files?cr_identifier={}&fields'.format(root_id, cr_id)) + response = self.client.get( + "/rest/v2/directories/{}/files?cr_identifier={}&fields".format(root_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - dirs = response.data['directories'] - self.assertEqual(len(dirs), 1, 'Expected 1 directory') - self.assertEqual(dirs[0]['file_count'], 1, 'Expected 1 file in directory %s' % dirs[0]['directory_path']) + dirs = response.data["directories"] + self.assertEqual(len(dirs), 1, "Expected 1 directory") + self.assertEqual( + dirs[0]["file_count"], + 1, + "Expected 1 file in directory %s" % dirs[0]["directory_path"], + ) # getting dataset files from /dir/ - response = self.client.get('/rest/v2/directories/{}/files?cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(len(response.data['files']), 1, 'Expected 1 file in directory {}' - .format(dirs[0]['directory_path'])) - self.assertEqual(response.data['file_count'], len(response.data['files']), - 'Expected 1 file in parent file_count') + response = self.client.get( + "/rest/v2/directories/{}/files?cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual( + len(response.data["files"]), + 1, + "Expected 1 file in directory {}".format(dirs[0]["directory_path"]), + ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 1 file in parent file_count", + ) # getting non-dataset files from /dir/ - response = self.client.get('/rest/v2/directories/{}/files?not_cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(len(response.data['files']), 2, 'Expected 2 files in directory {}' - .format(dirs[0]['directory_path'])) - self.assertEqual(response.data['file_count'], len(response.data['files']), - 'Expected 2 file in parent file_count') + response = self.client.get( + "/rest/v2/directories/{}/files?not_cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual( + len(response.data["files"]), + 2, + "Expected 2 files in directory {}".format(dirs[0]["directory_path"]), + ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 2 file in parent file_count", + ) # adding file2 and file3 to dataset - last_files = { - 'files': [ - {'identifier': file2_id}, - {'identifier': file3_id} - ]} + last_files = {"files": [{"identifier": file2_id}, {"identifier": file3_id}]} - response = self.client.post('/rest/v2/datasets/{}/files'.format(cr_id), last_files, format='json') + response = self.client.post( + "/rest/v2/datasets/{}/files".format(cr_id), last_files, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # getting dataset files from /dir/ - response = self.client.get('/rest/v2/directories/{}/files?cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(len(response.data['files']), 3, 'Expected 3 files in directory {}' - .format(dirs[0]['directory_path'])) - self.assertEqual(response.data['file_count'], len(response.data['files']), - 'Expected 3 file in parent file_count') + response = self.client.get( + "/rest/v2/directories/{}/files?cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual( + len(response.data["files"]), + 3, + "Expected 3 files in directory {}".format(dirs[0]["directory_path"]), + ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 3 file in parent file_count", + ) # getting non-dataset files from /dir/ - response = self.client.get('/rest/v2/directories/{}/files?not_cr_identifier={}&include_parent&{}' - .format(dirs[0]['id'], cr_id, fields)) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Directory must be empty') + response = self.client.get( + "/rest/v2/directories/{}/files?not_cr_identifier={}&include_parent&{}".format( + dirs[0]["id"], cr_id, fields + ) + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, "Directory must be empty") # getting dataset files from / - response = self.client.get('/rest/v2/directories/{}/files?cr_identifier={}&fields'.format(root_id, cr_id)) + response = self.client.get( + "/rest/v2/directories/{}/files?cr_identifier={}&fields".format(root_id, cr_id) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - dirs = response.data['directories'] - self.assertEqual(len(dirs), 1, 'Expected 1 directory') - self.assertEqual(dirs[0]['file_count'], 3, 'Expected 3 files in directory %s' % dirs[0]['directory_path']) + dirs = response.data["directories"] + self.assertEqual(len(dirs), 1, "Expected 1 directory") + self.assertEqual( + dirs[0]["file_count"], + 3, + "Expected 3 files in directory %s" % dirs[0]["directory_path"], + ) # getting dataset files from / - response = self.client.get('/rest/v2/directories/{}/files?not_cr_identifier={}&fields'.format(root_id, cr_id)) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Directory must be empty') + response = self.client.get( + "/rest/v2/directories/{}/files?not_cr_identifier={}&fields".format(root_id, cr_id) + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, "Directory must be empty") diff --git a/src/metax_api/tests/api/rest/v2/views/files/read.py b/src/metax_api/tests/api/rest/v2/views/files/read.py index f10cc32f..d560630e 100755 --- a/src/metax_api/tests/api/rest/v2/views/files/read.py +++ b/src/metax_api/tests/api/rest/v2/views/files/read.py @@ -21,80 +21,96 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileApiReadCommon, cls).setUpClass() def setUp(self): - file_from_test_data = self._get_object_from_test_data('file') - self.identifier = file_from_test_data['identifier'] - self.pk = file_from_test_data['id'] + file_from_test_data = self._get_object_from_test_data("file") + self.identifier = file_from_test_data["identifier"] + self.pk = file_from_test_data["id"] self._use_http_authorization() class FileApiReadBasicTests(FileApiReadCommon): - def test_read_file_list(self): - response = self.client.get('/rest/v2/files') + response = self.client.get("/rest/v2/files") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_file_list_filter_by_project(self): proj = File.objects.get(pk=1).project_identifier file_count = File.objects.filter(project_identifier=proj).count() - response = self.client.get('/rest/v2/files?project_identifier=%s' % proj) + response = self.client.get("/rest/v2/files?project_identifier=%s" % proj) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], file_count) + self.assertEqual(response.data["count"], file_count) def test_read_file_list_filter_by_project_and_path(self): proj = File.objects.get(pk=1).project_identifier path = "/project_x_FROZEN/Experiment_X/Phase_1/2017/01" file_count = File.objects.filter(project_identifier=proj, file_path__contains=path).count() - response = self.client.get('/rest/v2/files?project_identifier=%s&file_path=%s' % (proj, path)) + response = self.client.get( + "/rest/v2/files?project_identifier=%s&file_path=%s" % (proj, path) + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['count'], file_count) + self.assertEqual(response.data["count"], file_count) # missing project_identifier - response = self.client.get('/rest/v2/files?file_path=%s' % path) + response = self.client.get("/rest/v2/files?file_path=%s" % path) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_read_file_details_by_pk(self): - response = self.client.get('/rest/v2/files/%s' % self.pk) + response = self.client.get("/rest/v2/files/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('file_name' in response.data, True) - self.assertEqual(response.data['identifier'], self.identifier) - self.assertEqual('identifier' in response.data['file_storage'], True) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("file_name" in response.data, True) + self.assertEqual(response.data["identifier"], self.identifier) + self.assertEqual("identifier" in response.data["file_storage"], True) def test_read_file_details_by_identifier(self): - response = self.client.get('/rest/v2/files/%s' % self.identifier) + response = self.client.get("/rest/v2/files/%s" % self.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(hasattr(response, 'data'), True, 'Request response object is missing attribute \'data\'') - self.assertEqual('file_name' in response.data.keys(), True) - self.assertEqual(response.data['identifier'], self.identifier) + self.assertEqual( + hasattr(response, "data"), + True, + "Request response object is missing attribute 'data'", + ) + self.assertEqual("file_name" in response.data.keys(), True) + self.assertEqual(response.data["identifier"], self.identifier) def test_read_file_details_not_found(self): - response = self.client.get('/rest/v2/files/shouldnotexist') + response = self.client.get("/rest/v2/files/shouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_read_file_details_checksum_relation(self): - response = self.client.get('/rest/v2/files/%s' % self.pk) + response = self.client.get("/rest/v2/files/%s" % self.pk) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('checksum' in response.data, True) - self.assertEqual('value' in response.data['checksum'], True) + self.assertEqual("checksum" in response.data, True) + self.assertEqual("value" in response.data["checksum"], True) def test_expand_relations(self): - response = self.client.get('/rest/v2/files/1?expand_relation=file_storage,parent_directory') + response = self.client.get("/rest/v2/files/1?expand_relation=file_storage,parent_directory") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('file_storage_json' in response.data['file_storage'], True, response.data['file_storage']) - self.assertEqual('date_created' in response.data['parent_directory'], True, response.data['parent_directory']) + self.assertEqual( + "file_storage_json" in response.data["file_storage"], + True, + response.data["file_storage"], + ) + self.assertEqual( + "date_created" in response.data["parent_directory"], + True, + response.data["parent_directory"], + ) class FileApiReadGetRelatedDatasets(FileApiReadCommon): - def test_get_related_datasets_ok_1(self): """ File pk 1 should belong to only 3 datasets """ - response = self.client.post('/rest/v2/files/datasets', [1], format='json') + response = self.client.post("/rest/v2/files/datasets", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 3) @@ -102,8 +118,10 @@ def test_get_related_datasets_ok_2(self): """ File identifiers listed below should belong to 5 datasets """ - file_identifiers = File.objects.filter(id__in=[1, 2, 3, 4, 5]).values_list('identifier', flat=True) - response = self.client.post('/rest/v2/files/datasets', file_identifiers, format='json') + file_identifiers = File.objects.filter(id__in=[1, 2, 3, 4, 5]).values_list( + "identifier", flat=True + ) + response = self.client.post("/rest/v2/files/datasets", file_identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -111,31 +129,37 @@ def test_keysonly(self): """ Parameter ?keysonly should return just values """ - response = self.client.post('/rest/v2/files/datasets?keys=files&keysonly', [1, 2, 121], format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=files&keysonly", [1, 2, 121], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 2) # pid:urn:121 does not belong to any dataset - self.assertEqual(type(response.data), list, type(response.data)) # no dict keys + self._assert_results_length(response, 2) # pid:urn:121 does not belong to any dataset + self.assertEqual(type(response.data), list, type(response.data)) # no dict keys - response = self.client.post('/rest/v2/files/datasets?keys=files&keysonly=false', [1, 2], format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=files&keysonly=false", [1, 2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(type(response.data), dict, response.data) # Return by keys + self.assertEqual(type(response.data), dict, response.data) # Return by keys - response = self.client.post('/rest/v2/files/datasets?keys=datasets&keysonly', [1, 2, 14], format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=datasets&keysonly", [1, 2, 14], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 2) # Only datasets 1 and 2 have files - self.assertEqual(type(response.data), list, type(response.data)) # no dict keys + self._assert_results_length(response, 2) # Only datasets 1 and 2 have files + self.assertEqual(type(response.data), list, type(response.data)) # no dict keys def test_get_detailed_related_datasets_ok_1(self): """ File identifiers listed below should belong to 3 datasets """ - response = self.client.post('/rest/v2/files/datasets?keys=files', [1], format='json') + response = self.client.post("/rest/v2/files/datasets?keys=files", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 1) self.assertEqual(len(list(response.data.values())[0]), 3, response.data) # Support for ?detailed - response = self.client.post('/rest/v2/files/datasets?detailed', [1], format='json') + response = self.client.post("/rest/v2/files/datasets?detailed", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 1) self.assertEqual(len(list(response.data.values())[0]), 3, response.data) @@ -146,7 +170,9 @@ def test_get_detailed_related_datasets_ok_2(self): """ file_identifiers = [1, 2, 3, 4, 5] - response = self.client.post('/rest/v2/files/datasets?keys=files', file_identifiers, format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=files", file_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -154,9 +180,17 @@ def test_get_detailed_related_datasets_ok_2(self): self.assertEqual(len(set(sum(response.data.values(), []))), 5, response.data) # check if identifiers work - file_identifiers = ['pid:urn:1', 'pid:urn:2', 'pid:urn:3', 'pid:urn:4', 'pid:urn:5'] - - response = self.client.post('/rest/v2/files/datasets?keys=files', file_identifiers, format='json') + file_identifiers = [ + "pid:urn:1", + "pid:urn:2", + "pid:urn:3", + "pid:urn:4", + "pid:urn:5", + ] + + response = self.client.post( + "/rest/v2/files/datasets?keys=files", file_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -167,7 +201,7 @@ def test_get_detailed_related_files_ok_1(self): """ Dataset identifiers listed below should have 2 files """ - response = self.client.post('/rest/v2/files/datasets?keys=datasets', [1], format='json') + response = self.client.post("/rest/v2/files/datasets?keys=datasets", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 1) self.assertEqual(len(list(response.data.values())[0]), 2, response.data) @@ -178,7 +212,9 @@ def test_get_detailed_related_files_ok_2(self): """ dataset_identifiers = [1, 2, 3, 4, 5] - response = self.client.post('/rest/v2/files/datasets?keys=datasets', dataset_identifiers, format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=datasets", dataset_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -186,13 +222,17 @@ def test_get_detailed_related_files_ok_2(self): self.assertEqual(len(set(sum(response.data.values(), []))), 10, response.data) # check if identifiers work - dataset_identifiers = ["cr955e904-e3dd-4d7e-99f1-3fed446f96d1", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d2", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d3", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d4", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d5"] - - response = self.client.post('/rest/v2/files/datasets?keys=datasets', dataset_identifiers, format='json') + dataset_identifiers = [ + "cr955e904-e3dd-4d7e-99f1-3fed446f96d1", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d2", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d3", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d4", + "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", + ] + + response = self.client.post( + "/rest/v2/files/datasets?keys=datasets", dataset_identifiers, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 5) @@ -203,61 +243,84 @@ def test_get_right_files_and_datasets(self): """ Check that returned files and datasets are the right ones """ - testfile = self._get_object_from_test_data('file') + testfile = self._get_object_from_test_data("file") - cr = self.client.get('/rest/v2/datasets/10', format='json') + cr = self.client.get("/rest/v2/datasets/10", format="json") self.assertEqual(cr.status_code, status.HTTP_200_OK, cr.data) - response = self.client.post('/rest/v2/files/datasets?keys=datasets', [cr.data['identifier']], format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=datasets", + [cr.data["identifier"]], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # cr 10 has 2 default files for keys, values in response.data.items(): - self.assertEqual(keys == 'cr955e904-e3dd-4d7e-99f1-3fed446f9610', True, response.data) - self.assertEqual('pid:urn:19' and 'pid:urn:20' in values, True, response.data) + self.assertEqual(keys == "cr955e904-e3dd-4d7e-99f1-3fed446f9610", True, response.data) + self.assertEqual("pid:urn:19" and "pid:urn:20" in values, True, response.data) - response = self.client.post('/rest/files/datasets?keys=files', [testfile['identifier']], format='json') + response = self.client.post( + "/rest/files/datasets?keys=files", [testfile["identifier"]], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # file 1 belongs to 3 datasets for keys, values in response.data.items(): - self.assertEqual(keys == 'pid:urn:1', True, response.data) - self.assertEqual('cr955e904-e3dd-4d7e-99f1-3fed446f96d1' and 'cr955e904-e3dd-4d7e-99f1-3fed446f9612' - and 'cr955e904-e3dd-4d7e-99f1-3fed446f9611' in values, True, response.data) + self.assertEqual(keys == "pid:urn:1", True, response.data) + self.assertEqual( + "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" + and "cr955e904-e3dd-4d7e-99f1-3fed446f9612" + and "cr955e904-e3dd-4d7e-99f1-3fed446f9611" in values, + True, + response.data, + ) # Dataset 11 has 20 files in a directory - cr = self.client.get('/rest/v2/datasets/11', format='json') + cr = self.client.get("/rest/v2/datasets/11", format="json") self.assertEqual(cr.status_code, status.HTTP_200_OK, cr.data) # Compare using return from different api - files_in_cr11 = self.client.get('/rest/v2/datasets/11/files', format='json') + files_in_cr11 = self.client.get("/rest/v2/datasets/11/files", format="json") self.assertEqual(files_in_cr11.status_code, status.HTTP_200_OK, files_in_cr11.data) identifiers = [] - [identifiers.append(i['identifier']) for i in files_in_cr11.data] + [identifiers.append(i["identifier"]) for i in files_in_cr11.data] - response = self.client.post('/rest/v2/files/datasets?keys=datasets', [11], format='json') + response = self.client.post("/rest/v2/files/datasets?keys=datasets", [11], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # This should have the same file id's as the return from /rest/v2/datasets/11/files - self.assertEqual(sorted(response.data['cr955e904-e3dd-4d7e-99f1-3fed446f9611']), sorted(identifiers), - response.data) - - response = self.client.post('/rest/v2/files/datasets?keys=files', ['pid:urn:20'], format='json') + self.assertEqual( + sorted(response.data["cr955e904-e3dd-4d7e-99f1-3fed446f9611"]), + sorted(identifiers), + response.data, + ) + + response = self.client.post( + "/rest/v2/files/datasets?keys=files", ["pid:urn:20"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # Dataset 11 should be found from results - self.assertTrue('cr955e904-e3dd-4d7e-99f1-3fed446f9611' in response.data['pid:urn:20'], response.data) + self.assertTrue( + "cr955e904-e3dd-4d7e-99f1-3fed446f9611" in response.data["pid:urn:20"], + response.data, + ) def test_get_related_datasets_files_not_found(self): """ When the files themselves are not found, 404 should be returned """ - response = self.client.post('/rest/v2/files/datasets', ['doesnotexist'], format='json') + response = self.client.post("/rest/v2/files/datasets", ["doesnotexist"], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) - response = self.client.post('/rest/v2/files/datasets?keys=files', ['doesnotexist'], format='json') + response = self.client.post( + "/rest/v2/files/datasets?keys=files", ["doesnotexist"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) # Support for ?detailed - response = self.client.post('/rest/v2/files/datasets?detailed', ['doesnotexist'], format='json') + response = self.client.post( + "/rest/v2/files/datasets?detailed", ["doesnotexist"], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) @@ -267,28 +330,30 @@ def test_get_related_datasets_records_not_found(self): """ with connection.cursor() as cr: # detach file pk 1 from any datasets - cr.execute('delete from metax_api_catalogrecord_files where file_id = 1') + cr.execute("delete from metax_api_catalogrecord_files where file_id = 1") - response = self.client.post('/rest/v2/files/datasets', [1], format='json') + response = self.client.post("/rest/v2/files/datasets", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) - response = self.client.post('/rest/v2/files/datasets?keys=files', [1], format='json') + response = self.client.post("/rest/v2/files/datasets?keys=files", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) # Support for ?detailed - response = self.client.post('/rest/v2/files/datasets?detailed', [1], format='json') + response = self.client.post("/rest/v2/files/datasets?detailed", [1], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self._assert_results_length(response, 0) def _assert_results_length(self, response, length): - self.assertTrue(isinstance(response.data, dict) or isinstance(response.data, list), response.data) + self.assertTrue( + isinstance(response.data, dict) or isinstance(response.data, list), + response.data, + ) self.assertEqual(len(response.data), length) class FileApiReadEndUserAccess(FileApiReadCommon): - def setUp(self): super().setUp() self.token = get_test_oidc_token() @@ -296,32 +361,36 @@ def setUp(self): @responses.activate def test_user_can_read_owned_files(self): - ''' + """ Ensure users can only read files owned by them from /rest/v2/files api. - ''' + """ # first read files without project access - should fail - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) proj = File.objects.get(pk=1).project_identifier - response = self.client.get('/rest/v2/files/1') + response = self.client.get("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get('/rest/v2/files?project_identifier=%s' % proj) + response = self.client.get("/rest/v2/files?project_identifier=%s" % proj) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get('/rest/files?pagination=false') + response = self.client.get("/rest/files?pagination=false") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data), 0, 'should return 200 OK, but user projects has no files') + self.assertEqual( + len(response.data), + 0, + "should return 200 OK, but user projects has no files", + ) # set user to same project as previous files and try again. should now succeed - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get('/rest/v2/files') + response = self.client.get("/rest/v2/files") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data) > 0, True, 'user should only see their own files') + self.assertEqual(len(response.data) > 0, True, "user should only see their own files") - response = self.client.get('/rest/v2/files/1') + response = self.client.get("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/v2/files?project_identifier=%s' % proj) + response = self.client.get("/rest/v2/files?project_identifier=%s" % proj) self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/v2/views/files/write.py b/src/metax_api/tests/api/rest/v2/views/files/write.py index 4d5ddd64..9e3ea906 100755 --- a/src/metax_api/tests/api/rest/v2/views/files/write.py +++ b/src/metax_api/tests/api/rest/v2/views/files/write.py @@ -25,18 +25,18 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileApiWriteCommon, cls).setUpClass() def setUp(self): """ Reloaded for every test case """ - call_command('loaddata', test_data_file_path, verbosity=0) - file_from_test_data = self._get_object_from_test_data('file') - self.identifier = file_from_test_data['identifier'] - self.pidentifier = file_from_test_data['project_identifier'] - self.file_name = file_from_test_data['file_name'] + call_command("loaddata", test_data_file_path, verbosity=0) + file_from_test_data = self._get_object_from_test_data("file") + self.identifier = file_from_test_data["identifier"] + self.pidentifier = file_from_test_data["project_identifier"] + self.file_name = file_from_test_data["file_name"] """ New data that is sent to the server for POST, PUT, PATCH requests. Modified @@ -47,21 +47,25 @@ def setUp(self): self._use_http_authorization() def _get_new_test_data(self): - from_test_data = self._get_object_from_test_data('file', requested_index=0) - from_test_data.update({ - "checksum": { - "value": "habeebit", - "algorithm": "SHA-256", - "checked": "2017-05-23T10:07:22.559656Z", - }, - "file_name": "file_name_1", - "file_path": from_test_data['file_path'].replace('/some/path', '/some/other_path'), - "identifier": "urn:nbn:fi:csc-ida201401200000000001", - "file_storage": self._get_object_from_test_data('filestorage', requested_index=0) - }) - from_test_data['file_path'] = from_test_data['file_path'].replace('/Experiment_X/', '/test/path/') - from_test_data['project_identifier'] = 'test_project_identifier' - del from_test_data['id'] + from_test_data = self._get_object_from_test_data("file", requested_index=0) + from_test_data.update( + { + "checksum": { + "value": "habeebit", + "algorithm": "SHA-256", + "checked": "2017-05-23T10:07:22.559656Z", + }, + "file_name": "file_name_1", + "file_path": from_test_data["file_path"].replace("/some/path", "/some/other_path"), + "identifier": "urn:nbn:fi:csc-ida201401200000000001", + "file_storage": self._get_object_from_test_data("filestorage", requested_index=0), + } + ) + from_test_data["file_path"] = from_test_data["file_path"].replace( + "/Experiment_X/", "/test/path/" + ) + from_test_data["project_identifier"] = "test_project_identifier" + del from_test_data["id"] return from_test_data def _get_second_new_test_data(self): @@ -73,7 +77,7 @@ def _get_second_new_test_data(self): def _count_dirs_from_path(self, file_path): expected_dirs_count = 1 dir_name = dirname(file_path) - while dir_name != '/': + while dir_name != "/": dir_name = dirname(dir_name) expected_dirs_count += 1 return expected_dirs_count @@ -83,34 +87,34 @@ def _check_project_root_byte_size_and_file_count(self, project_identifier): A rather simple test to fetch the root directory of a project, and verify that the root's calculated total byte size and file count match what exists in the db. """ - byte_size = File.objects.filter(project_identifier=project_identifier) \ - .aggregate(Sum('byte_size'))['byte_size__sum'] + byte_size = File.objects.filter(project_identifier=project_identifier).aggregate( + Sum("byte_size") + )["byte_size__sum"] file_count = File.objects.filter(project_identifier=project_identifier).count() - response = self.client.get('/rest/v2/directories/root?project=%s' % project_identifier) + response = self.client.get("/rest/v2/directories/root?project=%s" % project_identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data['byte_size'], byte_size) - self.assertEqual(response.data['file_count'], file_count) + self.assertEqual(response.data["byte_size"], byte_size) + self.assertEqual(response.data["file_count"], file_count) def _change_file_path(self, file, new_name): - file['file_path'] = file['file_path'].replace(file['file_name'], new_name) - file['file_name'] = new_name + file["file_path"] = file["file_path"].replace(file["file_name"], new_name) + file["file_name"] = new_name class FileApiWriteReferenceDataValidationTests(FileApiWriteCommon): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('updatereferencedata', verbosity=0) + call_command("updatereferencedata", verbosity=0) super(FileApiWriteReferenceDataValidationTests, cls).setUpClass() def setUp(self): super().setUp() cache = RedisClient() - ffv_refdata = cache.get('reference_data')['reference_data']['file_format_version'] + ffv_refdata = cache.get("reference_data")["reference_data"]["file_format_version"] # File format version entry in reference data that has some output_format_version self.ff_with_version = None @@ -122,73 +126,116 @@ def setUp(self): for ffv_obj in ffv_refdata: if self.ff_with_different_version is None and self.ff_with_version is not None: - if ffv_obj['input_file_format'] == self.ff_with_version['input_file_format']: + if ffv_obj["input_file_format"] == self.ff_with_version["input_file_format"]: self.ff_with_different_version = ffv_obj - if self.ff_with_version is None and ffv_obj['output_format_version']: + if self.ff_with_version is None and ffv_obj["output_format_version"]: self.ff_with_version = ffv_obj - if self.ff_without_version is None and not ffv_obj['output_format_version']: + if self.ff_without_version is None and not ffv_obj["output_format_version"]: self.ff_without_version = ffv_obj - self.assertTrue(self.ff_with_version['output_format_version'] != '') - self.assertTrue(self.ff_with_different_version['output_format_version'] != '') - self.assertTrue(self.ff_with_version['input_file_format'] == - self.ff_with_different_version['input_file_format']) - self.assertTrue(self.ff_with_version['output_format_version'] != - self.ff_with_different_version['output_format_version']) - self.assertTrue(self.ff_without_version['output_format_version'] == '') - - def test_file_format_version_with_invalid_file_format_when_format_version_given_1(self): - self.test_new_data['file_characteristics']['format_version'] = 'any' - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + self.assertTrue(self.ff_with_version["output_format_version"] != "") + self.assertTrue(self.ff_with_different_version["output_format_version"] != "") + self.assertTrue( + self.ff_with_version["input_file_format"] + == self.ff_with_different_version["input_file_format"] + ) + self.assertTrue( + self.ff_with_version["output_format_version"] + != self.ff_with_different_version["output_format_version"] + ) + self.assertTrue(self.ff_without_version["output_format_version"] == "") + + def test_file_format_version_with_invalid_file_format_when_format_version_given_1( + self, + ): + self.test_new_data["file_characteristics"]["format_version"] = "any" + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.file_format' in response.data['file_characteristics'], True) + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.file_format" in response.data["file_characteristics"], + True, + ) - def test_file_format_version_with_invalid_file_format_when_format_version_given_2(self): - self.test_new_data['file_characteristics']['file_format'] = 'nonexisting' - self.test_new_data['file_characteristics']['format_version'] = 'any' - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + def test_file_format_version_with_invalid_file_format_when_format_version_given_2( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = "nonexisting" + self.test_new_data["file_characteristics"]["format_version"] = "any" + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.file_format' in response.data['file_characteristics'], True) + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.file_format" in response.data["file_characteristics"], + True, + ) - def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_1(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_1( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.format_version' in response.data['file_characteristics'], True) + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.format_version" in response.data["file_characteristics"], + True, + ) - def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_2(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = 'nonexisting' - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_2( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"]["format_version"] = "nonexisting" + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True) - self.assertEqual('file_characteristics.format_version' in response.data['file_characteristics'], True) + self.assertEqual("file_characteristics" in response.data.keys(), True) + self.assertEqual( + "file_characteristics.format_version" in response.data["file_characteristics"], + True, + ) - def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_1(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_without_version['input_file_format'] - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_1( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ + "input_file_format" + ] + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_2(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_without_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = '' - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_2( + self, + ): + self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"]["format_version"] = "" + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_file_format_version_with_valid_file_format_and_valid_file_version_1(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = self.ff_with_version['output_format_version'] - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"]["format_version"] = self.ff_with_version[ + "output_format_version" + ] + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_file_format_version_with_valid_file_format_and_valid_file_version_2(self): - self.test_new_data['file_characteristics']['file_format'] = self.ff_with_version['input_file_format'] - self.test_new_data['file_characteristics']['format_version'] = \ - self.ff_with_different_version['output_format_version'] - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ + "input_file_format" + ] + self.test_new_data["file_characteristics"][ + "format_version" + ] = self.ff_with_different_version["output_format_version"] + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) # update tests @@ -197,23 +244,30 @@ def test_file_characteristics_is_validated_on_update(self): """ Ensure validation also works when updating existing files. """ - self.test_new_data['file_characteristics']['file_format'] = self.ff_without_version['input_file_format'] - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ + "input_file_format" + ] + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.put('/rest/v2/files/%s' % response.data['identifier'], response.data, format="json") + response = self.client.put( + "/rest/v2/files/%s" % response.data["identifier"], + response.data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_format_version_is_removed(self): """ Empty file format version should be removed """ - self.test_new_data['file_characteristics']['file_format'] = "text/csv" - self.test_new_data['file_characteristics']['format_version'] = "" + self.test_new_data["file_characteristics"]["file_format"] = "text/csv" + self.test_new_data["file_characteristics"]["format_version"] = "" - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue('format_version' not in response.data['file_characteristics']) + self.assertTrue("format_version" not in response.data["file_characteristics"]) + class FileApiWriteCreateTests(FileApiWriteCommon): # @@ -226,81 +280,92 @@ class FileApiWriteCreateTests(FileApiWriteCommon): def test_create_file(self): # note: leading and trailing whitespace must be preserved. - newly_created_file_name = " MX .201015_Suomessa_tavattavat_ruokasammakot_ovat_vƤritykseltƤƤn_vaihtelevia_" \ - "osa_on_ruskeita,_osa_kirkkaankin_vihreitƤ._Vihersammakoiden_silmƤt_ovat_kohtalaisen_korkealla_pƤƤlae" \ + newly_created_file_name = ( + " MX .201015_Suomessa_tavattavat_ruokasammakot_ovat_vƤritykseltƤƤn_vaihtelevia_" + "osa_on_ruskeita,_osa_kirkkaankin_vihreitƤ._Vihersammakoiden_silmƤt_ovat_kohtalaisen_korkealla_pƤƤlae" "lla._Sammakkolampi.fi_CC-BY-NC-4.0_thumb.jpg.meta " - self.test_new_data['file_name'] = newly_created_file_name - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + ) + self.test_new_data["file_name"] = newly_created_file_name + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('file_name' in response.data.keys(), True) - self.assertEqual(response.data['file_name'], newly_created_file_name) - self._check_project_root_byte_size_and_file_count(response.data['project_identifier']) + self.assertEqual("file_name" in response.data.keys(), True) + self.assertEqual(response.data["file_name"], newly_created_file_name) + self._check_project_root_byte_size_and_file_count(response.data["project_identifier"]) def test_create_file_error_identifier_exists(self): # first ok - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") # second should give error - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('identifier' in response.data.keys(), True) - self.assertEqual('already exists' in response.data['identifier'][0], True) + self.assertEqual("identifier" in response.data.keys(), True) + self.assertEqual("already exists" in response.data["identifier"][0], True) def test_allow_creating_previously_deleted_file(self): """ It should be possible to delete a file, and then create the exact same file again without letting the removed file conflict. """ - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") - response = self.client.delete('/rest/v2/files/%d' % response.data['id'], format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") + response = self.client.delete("/rest/v2/files/%d" % response.data["id"], format="json") - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def test_create_file_error_json_validation(self): - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' - self.test_new_data['file_characteristics'] = { + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" + self.test_new_data["file_characteristics"] = { "application_name": "Application Name", "description": "A nice description 0000000010", "metadata_modified": 12345, "file_created": "2014-01-17T08:19:31Z", "encoding": "utf-8", - "title": "A title 0000000010" + "title": "A title 0000000010", } - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_characteristics' in response.data.keys(), True, - 'The error should concern the field file_characteristics') - self.assertEqual('metadata_modified' in response.data['file_characteristics'][0], True, - 'The error should contain the name of the erroneous field') - self.assertEqual('Json path:' in response.data['file_characteristics'][0], True, - 'The error should contain the json path') + self.assertEqual( + "file_characteristics" in response.data.keys(), + True, + "The error should concern the field file_characteristics", + ) + self.assertEqual( + "metadata_modified" in response.data["file_characteristics"][0], + True, + "The error should contain the name of the erroneous field", + ) + self.assertEqual( + "Json path:" in response.data["file_characteristics"][0], + True, + "The error should contain the json path", + ) def test_create_file_allowed_checksum_algorithm(self): - self.test_new_data['checksum']['algorithm'] = 'SHA-512' + self.test_new_data["checksum"]["algorithm"] = "SHA-512" - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['checksum']['algorithm'], 'SHA-512') + self.assertEqual(response.data["checksum"]["algorithm"], "SHA-512") - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-md5' - self.test_new_data['file_path'] = '/md5/filepath/md5-filename' - self.test_new_data['file_name'] = 'md5-filename' - self.test_new_data['checksum']['algorithm'] = 'MD5' + self.test_new_data["identifier"] = "urn:nbn:fi:csc-md5" + self.test_new_data["file_path"] = "/md5/filepath/md5-filename" + self.test_new_data["file_name"] = "md5-filename" + self.test_new_data["checksum"]["algorithm"] = "MD5" - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['checksum']['algorithm'], 'MD5') + self.assertEqual(response.data["checksum"]["algorithm"], "MD5") def test_create_file_not_allowed_checksum_algorithm(self): from django.db import transaction - for algo in ['sha2', 'sha256', 'sha-256']: + for algo in ["sha2", "sha256", "sha-256"]: # run POST requests inside db transaction to ensure django testcase transactions # work correctly. https://stackoverflow.com/a/23326971/1201945 this probably has # somethind to do with the fact that POST requests to /rest/v2/files do not normally @@ -310,45 +375,60 @@ def test_create_file_not_allowed_checksum_algorithm(self): # alternative for below would be to use optional query param ?dryrun=true, which # causes the request to be executed inside a transaction too. with transaction.atomic(): - self.test_new_data['checksum']['algorithm'] = algo - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + self.test_new_data["checksum"]["algorithm"] = algo + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('checksum_algorithm' in response.data, True) + self.assertEqual("checksum_algorithm" in response.data, True) # # create list operations # def test_create_file_list(self): - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' - self._change_file_path(self.test_new_data, 'one_file.txt') + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" + self._change_file_path(self.test_new_data, "one_file.txt") - self.second_test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurnalso' - self._change_file_path(self.second_test_new_data, 'two_file.txt') + self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurnalso" + self._change_file_path(self.second_test_new_data, "two_file.txt") - response = self.client.post('/rest/v2/files', [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/v2/files", + [self.test_new_data, self.second_test_new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('object' in response.data['success'][0].keys(), True) - self.assertEqual(len(response.data['failed']), 0, response.data['failed']) - self.assertEqual(len(response.data['success']), 2) - self._check_project_root_byte_size_and_file_count(response.data['success'][0]['object']['project_identifier']) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("object" in response.data["success"][0].keys(), True) + self.assertEqual(len(response.data["failed"]), 0, response.data["failed"]) + self.assertEqual(len(response.data["success"]), 2) + self._check_project_root_byte_size_and_file_count( + response.data["success"][0]["object"]["project_identifier"] + ) # ensure structure of some specific fields is the same as when single files are created - self.assertEqual('identifier' in response.data['success'][0]['object']['file_storage'], True) - self.assertEqual('identifier' in response.data['success'][0]['object']['parent_directory'], True) - self.assertEqual('checksum' in response.data['success'][0]['object'], True) - self.assertEqual('value' in response.data['success'][0]['object']['checksum'], True) + self.assertEqual( + "identifier" in response.data["success"][0]["object"]["file_storage"], True + ) + self.assertEqual( + "identifier" in response.data["success"][0]["object"]["parent_directory"], + True, + ) + self.assertEqual("checksum" in response.data["success"][0]["object"], True) + self.assertEqual("value" in response.data["success"][0]["object"]["checksum"], True) def test_create_file_list_error_one_fails(self): - newly_created_file_name = 'newly_created_file_name' - self.test_new_data['file_name'] = newly_created_file_name - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + newly_created_file_name = "newly_created_file_name" + self.test_new_data["file_name"] = newly_created_file_name + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" # same as above - should fail - self.second_test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - response = self.client.post('/rest/v2/files', [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/v2/files", + [self.test_new_data, self.second_test_new_data], + format="json", + ) """ List response looks like @@ -367,40 +447,50 @@ def test_create_file_list_error_one_fails(self): } """ self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual('file_name' in response.data['failed'][0]['object'].keys(), True) - self.assertEqual('identifier' in response.data['failed'][0]['errors'], True, - 'The error should have been about an already existing identifier') + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual("file_name" in response.data["failed"][0]["object"].keys(), True) + self.assertEqual( + "identifier" in response.data["failed"][0]["errors"], + True, + "The error should have been about an already existing identifier", + ) def test_parameter_ignore_already_exists_errors(self): - newly_created_file_name = 'newly_created_file_name' - self.test_new_data['file_name'] = newly_created_file_name - self.test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + newly_created_file_name = "newly_created_file_name" + self.test_new_data["file_name"] = newly_created_file_name + self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" # same as above - should cause an error. - self.second_test_new_data['identifier'] = 'urn:nbn:fi:csc-thisisanewurn' + self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - response = self.client.post('/rest/v2/files?ignore_already_exists_errors', - [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/v2/files?ignore_already_exists_errors", + [self.test_new_data, self.second_test_new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual(len(response.data), 2) - self.assertEqual('already exists' in response.data['success'][1]['object']['detail'], True) + self.assertEqual("already exists" in response.data["success"][1]["object"]["detail"], True) def test_create_file_list_error_all_fail(self): - newly_created_file_name = 'newly_created_file_name' - self.test_new_data['file_name'] = newly_created_file_name + newly_created_file_name = "newly_created_file_name" + self.test_new_data["file_name"] = newly_created_file_name # identifier is a required field, should fail - self.test_new_data['identifier'] = None - self.second_test_new_data['identifier'] = None + self.test_new_data["identifier"] = None + self.second_test_new_data["identifier"] = None - response = self.client.post('/rest/v2/files', [self.test_new_data, self.second_test_new_data], format="json") + response = self.client.post( + "/rest/v2/files", + [self.test_new_data, self.second_test_new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual('failed' in response.data.keys(), True) - self.assertEqual('object' in response.data['failed'][0].keys(), True) - self.assertEqual(len(response.data['success']), 0) - self.assertEqual(len(response.data['failed']), 2) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual("failed" in response.data.keys(), True) + self.assertEqual("object" in response.data["failed"][0].keys(), True) + self.assertEqual(len(response.data["success"]), 0) + self.assertEqual(len(response.data["failed"]), 2) class FileApiWriteCreateDirectoriesTests(FileApiWriteCommon): @@ -416,19 +506,21 @@ def test_create_file_hierarchy_from_single_file(self): """ f = self._form_complex_list_from_test_file()[0] - file_path = '/project_y_FROZEN/Experiment_1/path/of/lonely/file_and_this_also_has_to_support' \ - 'veryverylooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' \ - 'ooooooooooooooooooooooooooooooooooooooongdirectorynames/%s' - f['file_path'] = file_path % f['file_name'] - f['identifier'] = 'abc123111' + file_path = ( + "/project_y_FROZEN/Experiment_1/path/of/lonely/file_and_this_also_has_to_support" + "veryverylooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooongdirectorynames/%s" + ) + f["file_path"] = file_path % f["file_name"] + f["identifier"] = "abc123111" - response = self.client.post('/rest/v2/files', f, format="json") + response = self.client.post("/rest/v2/files", f, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('date_created' in response.data, True) - self.assertEqual('parent_directory' in response.data, True) + self.assertEqual("date_created" in response.data, True) + self.assertEqual("parent_directory" in response.data, True) - dirs_count = Directory.objects.filter(project_identifier='project_y').count() - dirs_created_count = self._count_dirs_from_path(f['file_path']) + dirs_count = Directory.objects.filter(project_identifier="project_y").count() + dirs_created_count = self._count_dirs_from_path(f["file_path"]) self.assertEqual(dirs_count, dirs_created_count) def test_create_file_append_to_existing_directory(self): @@ -438,24 +530,38 @@ def test_create_file_append_to_existing_directory(self): Note: Targeting project_x, which exists in pre-generated test data. """ - project_identifier = 'project_x' + project_identifier = "project_x" dir_count_before = Directory.objects.filter(project_identifier=project_identifier).count() - file_count_before = Directory.objects.filter(project_identifier=project_identifier, - directory_path='/project_x_FROZEN/Experiment_X/Phase_1').first().files.all().count() + file_count_before = ( + Directory.objects.filter( + project_identifier=project_identifier, + directory_path="/project_x_FROZEN/Experiment_X/Phase_1", + ) + .first() + .files.all() + .count() + ) f = self._form_complex_list_from_test_file()[0] - f['file_path'] = '/project_x_FROZEN/Experiment_X/Phase_1/%s' % f['file_name'] - f['identifier'] = '%s-111' % f['file_path'] - f['project_identifier'] = project_identifier + f["file_path"] = "/project_x_FROZEN/Experiment_X/Phase_1/%s" % f["file_name"] + f["identifier"] = "%s-111" % f["file_path"] + f["project_identifier"] = project_identifier - response = self.client.post('/rest/v2/files', f, format="json") + response = self.client.post("/rest/v2/files", f, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('date_created' in response.data, True) - self.assertEqual('parent_directory' in response.data, True) + self.assertEqual("date_created" in response.data, True) + self.assertEqual("parent_directory" in response.data, True) dir_count_after = Directory.objects.filter(project_identifier=project_identifier).count() - file_count_after = Directory.objects.filter(project_identifier=project_identifier, - directory_path='/project_x_FROZEN/Experiment_X/Phase_1').first().files.all().count() + file_count_after = ( + Directory.objects.filter( + project_identifier=project_identifier, + directory_path="/project_x_FROZEN/Experiment_X/Phase_1", + ) + .first() + .files.all() + .count() + ) self.assertEqual(dir_count_before, dir_count_after) self.assertEqual(file_count_after - file_count_before, 1) @@ -467,13 +573,13 @@ def test_create_file_hierarchy_from_file_list_with_no_existing_files(self): """ experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 12) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 12) + self.assertEqual(len(response.data["failed"]), 0) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_create_file_hierarchy_from_file_list_with_existing_files(self): @@ -490,23 +596,25 @@ def test_create_file_hierarchy_from_file_list_with_existing_files(self): # setup db to have pre-existing dirs experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") # form new test data experiment_2_file_list = self._form_complex_list_from_test_file() for i, f in enumerate(experiment_2_file_list): - f['file_path'] = f['file_path'].replace('/project_y_FROZEN/Experiment_1', - '/project_y_FROZEN/Experiment_2/Phase_1/Data') - f['identifier'] = '%s-%d' % (f['file_path'], i) + f["file_path"] = f["file_path"].replace( + "/project_y_FROZEN/Experiment_1", + "/project_y_FROZEN/Experiment_2/Phase_1/Data", + ) + f["identifier"] = "%s-%d" % (f["file_path"], i) - response = self.client.post('/rest/v2/files', experiment_2_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_2_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 12) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 12) + self.assertEqual(len(response.data["failed"]), 0) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_append_files_to_existing_directory(self): @@ -518,7 +626,7 @@ def test_append_files_to_existing_directory(self): # setup db to have pre-existing dirs experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") # form new test data, and trim it down a bit experiment_2_file_list = self._form_complex_list_from_test_file() @@ -526,16 +634,16 @@ def test_append_files_to_existing_directory(self): experiment_2_file_list.pop() for i, f in enumerate(experiment_2_file_list): - f['file_path'] = '/project_y_FROZEN/Experiment_2/%s' % f['file_name'] - f['identifier'] = '%s-%d' % (f['file_path'], i) + f["file_path"] = "/project_y_FROZEN/Experiment_2/%s" % f["file_name"] + f["identifier"] = "%s-%d" % (f["file_path"], i) - response = self.client.post('/rest/v2/files', experiment_2_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_2_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['failed']), 0, response.data['failed']) - self.assertEqual(len(response.data['success']), 5) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["failed"]), 0, response.data["failed"]) + self.assertEqual(len(response.data["success"]), 5) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_append_one_file_to_existing_directory(self): @@ -547,22 +655,22 @@ def test_append_one_file_to_existing_directory(self): # setup db to have pre-existing dirs experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") # form new test data, but use just the first item experiment_2_file_list = self._form_complex_list_from_test_file()[0:1] for i, f in enumerate(experiment_2_file_list): - f['file_path'] = '/project_y_FROZEN/Experiment_2/%s' % f['file_name'] - f['identifier'] = '%s-%d' % (f['file_path'], i) + f["file_path"] = "/project_y_FROZEN/Experiment_2/%s" % f["file_name"] + f["identifier"] = "%s-%d" % (f["file_path"], i) - response = self.client.post('/rest/v2/files', experiment_2_file_list, format="json") + response = self.client.post("/rest/v2/files", experiment_2_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual('success' in response.data.keys(), True) - self.assertEqual(len(response.data['success']), 1) - self.assertEqual(len(response.data['failed']), 0) + self.assertEqual("success" in response.data.keys(), True) + self.assertEqual(len(response.data["success"]), 1) + self.assertEqual(len(response.data["failed"]), 0) - dirs_dict = self._assert_directory_parent_dirs('project_y') + dirs_dict = self._assert_directory_parent_dirs("project_y") self._assert_file_parent_dirs(dirs_dict, response) def test_create_file_hierarchy_error_file_list_has_invalid_data(self): @@ -572,33 +680,35 @@ def test_create_file_hierarchy_error_file_list_has_invalid_data(self): in a single request is also not permitted. """ experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0].pop('file_path') - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + experiment_1_file_list[0].pop("file_path") + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('file_path' in response.data, True) - self.assertEqual('required parameter' in response.data['file_path'][0], True) + self.assertEqual("file_path" in response.data, True) + self.assertEqual("required parameter" in response.data["file_path"][0], True) experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0].pop('project_identifier') - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + experiment_1_file_list[0].pop("project_identifier") + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('project_identifier' in response.data, True) - self.assertEqual('required parameter' in response.data['project_identifier'][0], True) + self.assertEqual("project_identifier" in response.data, True) + self.assertEqual("required parameter" in response.data["project_identifier"][0], True) experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0]['project_identifier'] = 'second_project' - response = self.client.post('/rest/v2/files', experiment_1_file_list, format="json") + experiment_1_file_list[0]["project_identifier"] = "second_project" + response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('project_identifier' in response.data, True) - self.assertEqual('multiple projects' in response.data['project_identifier'][0], True) + self.assertEqual("project_identifier" in response.data, True) + self.assertEqual("multiple projects" in response.data["project_identifier"][0], True) def test_filepath_starts_with_slash(self): file = self._get_new_test_data() - file['file_path'] = file['file_path'][1:] + file["file_path"] = file["file_path"][1:] - response = self.client.post('/rest/v2/files', file, format="json") + response = self.client.post("/rest/v2/files", file, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("file path should start with '/' to point to the root" in response.data['file_path'][0]) + self.assertTrue( + "file path should start with '/' to point to the root" in response.data["file_path"][0] + ) def _assert_directory_parent_dirs(self, project_identifier): """ @@ -608,17 +718,24 @@ def _assert_directory_parent_dirs(self, project_identifier): for d in Directory.objects.filter(project_identifier=project_identifier): dirs_dict[d.directory_path] = { - 'dir_id': d.id, - 'parent_dir_id': d.parent_directory and d.parent_directory.id or None + "dir_id": d.id, + "parent_dir_id": d.parent_directory and d.parent_directory.id or None, } for dir_path, ids in dirs_dict.items(): - if dir_path == '/': - self.assertEqual(ids['parent_dir_id'], None, 'root dir \'/\' should not have a parent directory') + if dir_path == "/": + self.assertEqual( + ids["parent_dir_id"], + None, + "root dir '/' should not have a parent directory", + ) continue expected_parent_dir_path = dirname(dir_path) - self.assertEqual(ids['parent_dir_id'], dirs_dict[expected_parent_dir_path]['dir_id'], - 'parent dir not as expected.') + self.assertEqual( + ids["parent_dir_id"], + dirs_dict[expected_parent_dir_path]["dir_id"], + "parent dir not as expected.", + ) return dirs_dict @@ -626,11 +743,14 @@ def _assert_file_parent_dirs(self, dirs_dict, response): """ Check files have parent dirs as expected. """ - for entry in response.data['success']: - f = entry['object'] - excpected_parent_dir_path = dirname(f['file_path']) - self.assertEqual(f['parent_directory']['id'], dirs_dict[excpected_parent_dir_path]['dir_id'], - 'parent dir not as expected.') + for entry in response.data["success"]: + f = entry["object"] + excpected_parent_dir_path = dirname(f["file_path"]) + self.assertEqual( + f["parent_directory"]["id"], + dirs_dict[excpected_parent_dir_path]["dir_id"], + "parent dir not as expected.", + ) def _form_complex_list_from_test_file(self): """ @@ -688,18 +808,20 @@ def _form_complex_list_from_test_file(self): ] template = self.test_new_data - template.pop('id', None) - template.pop('identifier', None) - template.pop('project_identifier', None) - template.pop('parent_directory', None) - template.pop('date_created', None) - template.pop('date_modified', None) - template.pop('service_created', None) + template.pop("id", None) + template.pop("identifier", None) + template.pop("project_identifier", None) + template.pop("parent_directory", None) + template.pop("date_created", None) + template.pop("date_modified", None) + template.pop("service_created", None) files = [] for i, d in enumerate(dir_data): files.append(deepcopy(template)) - files[-1].update(d, identifier='pid:urn:test:file:%d' % i, project_identifier='project_y') + files[-1].update( + d, identifier="pid:urn:test:file:%d" % i, project_identifier="project_y" + ) return files @@ -710,15 +832,15 @@ class FileApiWriteUpdateTests(FileApiWriteCommon): """ def test_update_file(self): - f = self.client.get('/rest/v2/files/1').data - f['file_format'] = 'csv' - response = self.client.put('/rest/v2/files/%s' % f['identifier'], f, format="json") + f = self.client.get("/rest/v2/files/1").data + f["file_format"] = "csv" + response = self.client.put("/rest/v2/files/%s" % f["identifier"], f, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_prevent_file_path_update_after_create(self): - f = self.client.get('/rest/v2/files/1').data - f['file_path'] = '%s_bak' % f['file_path'] - response = self.client.put('/rest/v2/files/%s' % f['identifier'], f, format="json") + f = self.client.get("/rest/v2/files/1").data + f["file_path"] = "%s_bak" % f["file_path"] + response = self.client.put("/rest/v2/files/%s" % f["identifier"], f, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_update_file_error_required_fields(self): @@ -726,122 +848,151 @@ def test_update_file_error_required_fields(self): Field 'project_identifier' is missing, which should result in an error, since PUT replaces an object and requires all 'required' fields to be present. """ - self.test_new_data.pop('project_identifier') - response = self.client.put('/rest/v2/files/%s' % self.identifier, self.test_new_data, format="json") + self.test_new_data.pop("project_identifier") + response = self.client.put( + "/rest/v2/files/%s" % self.identifier, self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('project_identifier' in response.data.keys(), True, - 'Error for field \'project_identifier\' is missing from response.data') + self.assertEqual( + "project_identifier" in response.data.keys(), + True, + "Error for field 'project_identifier' is missing from response.data", + ) def test_update_file_not_found(self): - response = self.client.put('/rest/v2/files/doesnotexist', self.test_new_data, format="json") + response = self.client.put("/rest/v2/files/doesnotexist", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_update_file_allowed_projects_ok(self): - f = self.client.get('/rest/v2/files/1').data - response = self.client.put('/rest/v2/files/%s?allowed_projects=%s' % (f['identifier'], f['project_identifier']), - f, format="json") + f = self.client.get("/rest/v2/files/1").data + response = self.client.put( + "/rest/v2/files/%s?allowed_projects=%s" % (f["identifier"], f["project_identifier"]), + f, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_file_allowed_projects_fail(self): - f = self.client.get('/rest/v2/files/1').data + f = self.client.get("/rest/v2/files/1").data response = self.client.put( - '/rest/v2/files/%s?allowed_projects=nopermission' % f['identifier'], f, format="json" + "/rest/v2/files/%s?allowed_projects=nopermission" % f["identifier"], + f, + format="json", ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_update_file_allowed_projects_not_dict(self): - f = self.client.get('/rest/v2/files/1').data - response = self.client.put('/rest/v2/files/%s?allowed_projects=%s' % (f['identifier'], f['project_identifier']), - [f], format="json") + f = self.client.get("/rest/v2/files/1").data + response = self.client.put( + "/rest/v2/files/%s?allowed_projects=%s" % (f["identifier"], f["project_identifier"]), + [f], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('json' in response.data['detail'][0], True, 'Error regarding datatype') + self.assertEqual("json" in response.data["detail"][0], True, "Error regarding datatype") # # update list operations PUT # def test_file_update_list(self): - f1 = self.client.get('/rest/v2/files/1').data - f2 = self.client.get('/rest/v2/files/2').data - new_file_format = 'changed-format' - new_file_format_2 = 'changed-format-2' - f1['file_format'] = new_file_format - f2['file_format'] = new_file_format_2 - - response = self.client.put('/rest/v2/files', [f1, f2], format="json") + f1 = self.client.get("/rest/v2/files/1").data + f2 = self.client.get("/rest/v2/files/2").data + new_file_format = "changed-format" + new_file_format_2 = "changed-format-2" + f1["file_format"] = new_file_format + f2["file_format"] = new_file_format_2 + + response = self.client.put("/rest/v2/files", [f1, f2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) updated_file = File.objects.get(pk=1) self.assertEqual(updated_file.file_format, new_file_format) def test_file_update_list_error_one_fails(self): - f1 = self.client.get('/rest/v2/files/1').data - f2 = self.client.get('/rest/v2/files/2').data - new_file_format = 'changed-format' - f1['file_format'] = new_file_format + f1 = self.client.get("/rest/v2/files/1").data + f2 = self.client.get("/rest/v2/files/2").data + new_file_format = "changed-format" + f1["file_format"] = new_file_format # cant be null - should fail - f2['file_frozen'] = None + f2["file_frozen"] = None - response = self.client.put('/rest/v2/files', [f1, f2], format="json") + response = self.client.put("/rest/v2/files", [f1, f2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['success']), 1, 'success list should be empty') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - self.assertEqual('file_frozen' in response.data['failed'][0]['errors'], True, - 'error should be about file_characteristics missing') + self.assertEqual(len(response.data["success"]), 1, "success list should be empty") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + self.assertEqual( + "file_frozen" in response.data["failed"][0]["errors"], + True, + "error should be about file_characteristics missing", + ) updated_file = File.objects.get(pk=1) self.assertEqual(updated_file.file_format, new_file_format) def test_file_update_list_error_key_not_found(self): - f1 = self.client.get('/rest/v2/files/1').data - f2 = self.client.get('/rest/v2/files/2').data - new_file_format = 'changed-format' - new_file_format_2 = 'changed-format-2' - f1['file_format'] = new_file_format - f2['file_format'] = new_file_format_2 + f1 = self.client.get("/rest/v2/files/1").data + f2 = self.client.get("/rest/v2/files/2").data + new_file_format = "changed-format" + new_file_format_2 = "changed-format-2" + f1["file_format"] = new_file_format + f2["file_format"] = new_file_format_2 # has no lookup key - should fail - f2.pop('id') - f2.pop('identifier') + f2.pop("id") + f2.pop("identifier") - response = self.client.put('/rest/v2/files', [f1, f2], format="json") + response = self.client.put("/rest/v2/files", [f1, f2], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['success']), 1, 'success list should be empty') - self.assertEqual(len(response.data['failed']), 1, 'there should have been one failed element') - error_msg_of_failed_row = response.data['failed'][0]['errors']['detail'][0] - self.assertEqual('identifying keys' in error_msg_of_failed_row, True, - 'error should be about identifying keys missing') + self.assertEqual(len(response.data["success"]), 1, "success list should be empty") + self.assertEqual( + len(response.data["failed"]), 1, "there should have been one failed element" + ) + error_msg_of_failed_row = response.data["failed"][0]["errors"]["detail"][0] + self.assertEqual( + "identifying keys" in error_msg_of_failed_row, + True, + "error should be about identifying keys missing", + ) updated_file = File.objects.get(pk=1) self.assertEqual(updated_file.file_format, new_file_format) def test_file_update_list_allowed_projects_ok(self): # Both files in project 'project_x' - f1 = self.client.get('/rest/v2/files/1').data - f2 = self.client.get('/rest/v2/files/2').data + f1 = self.client.get("/rest/v2/files/1").data + f2 = self.client.get("/rest/v2/files/2").data - response = self.client.put('/rest/v2/files?allowed_projects=project_x,y,z', [f1, f2], format="json") + response = self.client.put( + "/rest/v2/files?allowed_projects=project_x,y,z", [f1, f2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_file_update_list_allowed_projects_fail(self): # Files in projects 'project_x' and 'research_project_112' - f1 = self.client.get('/rest/v2/files/1').data - f2 = self.client.get('/rest/v2/files/39').data + f1 = self.client.get("/rest/v2/files/1").data + f2 = self.client.get("/rest/v2/files/39").data - response = self.client.put('/rest/v2/files?allowed_projects=project_x,y,z', [f1, f2], format="json") + response = self.client.put( + "/rest/v2/files?allowed_projects=project_x,y,z", [f1, f2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_file_update_list_allowed_projects_empty_value(self): - f1 = self.client.get('/rest/v2/files/1').data - response = self.client.put('/rest/v2/files?allowed_projects=', [f1], format="json") + f1 = self.client.get("/rest/v2/files/1").data + response = self.client.put("/rest/v2/files?allowed_projects=", [f1], format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_file_update_list_allowed_projects_not_list(self): new_data_1 = {} - new_data_1['identifier'] = "pid:urn:1" - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["identifier"] = "pid:urn:1" + new_data_1["file_name"] = "Nice_new_name" - res = self.client.patch('/rest/v2/files?allowed_projects=y,z,project_x', new_data_1, format="json") + res = self.client.patch( + "/rest/v2/files?allowed_projects=y,z,project_x", new_data_1, format="json" + ) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) @@ -854,29 +1005,43 @@ def test_update_file_partial(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/v2/files/%s' % self.identifier, new_data, format="json") + response = self.client.patch("/rest/v2/files/%s" % self.identifier, new_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('file_name' in response.data.keys(), True) - self.assertEqual('file_path' in response.data.keys(), True, 'PATCH operation should return full content') - self.assertEqual(response.data['file_name'], 'new_file_name', 'Field file_name was not updated') + self.assertEqual("file_name" in response.data.keys(), True) + self.assertEqual( + "file_path" in response.data.keys(), + True, + "PATCH operation should return full content", + ) + self.assertEqual( + response.data["file_name"], + "new_file_name", + "Field file_name was not updated", + ) def test_update_partial_allowed_projects_ok(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/v2/files/%s?allowed_projects=%s' % (self.identifier, self.pidentifier), - new_data, format="json") + response = self.client.patch( + "/rest/v2/files/%s?allowed_projects=%s" % (self.identifier, self.pidentifier), + new_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['file_name'], 'new_file_name', response.data) + self.assertEqual(response.data["file_name"], "new_file_name", response.data) def test_update_partial_allowed_projects_fail(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/v2/files/%s?allowed_projects=noproject' % self.identifier, - new_data, format="json") + response = self.client.patch( + "/rest/v2/files/%s?allowed_projects=noproject" % self.identifier, + new_data, + format="json", + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) @@ -884,79 +1049,105 @@ def test_update_partial_allowed_projects_not_dict(self): new_data = { "file_name": "new_file_name", } - response = self.client.patch('/rest/v2/files/%s?allowed_projects=%s' % (self.identifier, self.pidentifier), - [new_data], format="json") + response = self.client.patch( + "/rest/v2/files/%s?allowed_projects=%s" % (self.identifier, self.pidentifier), + [new_data], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('json' in response.data['detail'][0], True, 'Error regarding datatype') + self.assertEqual("json" in response.data["detail"][0], True, "Error regarding datatype") # # update list operations PATCH # def test_file_partial_update_list(self): - new_project_identifier = 'changed-project-identifier' - new_project_identifier_2 = 'changed-project-identifier-2' + new_project_identifier = "changed-project-identifier" + new_project_identifier_2 = "changed-project-identifier-2" test_data = {} - test_data['id'] = 1 - test_data['project_identifier'] = new_project_identifier + test_data["id"] = 1 + test_data["project_identifier"] = new_project_identifier second_test_data = {} - second_test_data['id'] = 2 - second_test_data['project_identifier'] = new_project_identifier_2 + second_test_data["id"] = 2 + second_test_data["project_identifier"] = new_project_identifier_2 - response = self.client.patch('/rest/v2/files', [test_data, second_test_data], format="json") + response = self.client.patch("/rest/v2/files", [test_data, second_test_data], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('success' in response.data, True, 'response.data should contain list of changed objects') - self.assertEqual(len(response.data['success']), 2, 'response.data should contain 2 changed objects') - self.assertEqual('file_characteristics' in response.data['success'][0]['object'], True, - 'response.data should contain full objects') + self.assertEqual( + "success" in response.data, + True, + "response.data should contain list of changed objects", + ) + self.assertEqual( + len(response.data["success"]), + 2, + "response.data should contain 2 changed objects", + ) + self.assertEqual( + "file_characteristics" in response.data["success"][0]["object"], + True, + "response.data should contain full objects", + ) updated_file = File.objects.get(pk=1) - self.assertEqual(updated_file.project_identifier, new_project_identifier, 'project_identifier did not update') + self.assertEqual( + updated_file.project_identifier, + new_project_identifier, + "project_identifier did not update", + ) def test_file_partial_update_list_allowed_projects_ok(self): new_data_1 = {} - new_data_1['identifier'] = "pid:urn:1" - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["identifier"] = "pid:urn:1" + new_data_1["file_name"] = "Nice_new_name" new_data_2 = {} - new_data_2['identifier'] = "pid:urn:2" - new_data_2['file_name'] = 'Not_so_nice_name' + new_data_2["identifier"] = "pid:urn:2" + new_data_2["file_name"] = "Not_so_nice_name" res = self.client.patch( - '/rest/v2/files?allowed_projects=y,z,project_x', [new_data_1, new_data_2], format="json" + "/rest/v2/files?allowed_projects=y,z,project_x", + [new_data_1, new_data_2], + format="json", ) self.assertEqual(res.status_code, status.HTTP_200_OK, res.data) - self.assertEqual(res.data['success'][0]['object']['file_name'], 'Nice_new_name', res.data) + self.assertEqual(res.data["success"][0]["object"]["file_name"], "Nice_new_name", res.data) def test_file_partial_update_list_allowed_projects_fail(self): # Files in projects 'project_x' and 'research_project_112' - f1 = self.client.get('/rest/v2/files/1').data - f2 = self.client.get('/rest/v2/files/39').data + f1 = self.client.get("/rest/v2/files/1").data + f2 = self.client.get("/rest/v2/files/39").data - response = self.client.patch('/rest/v2/files?allowed_projects=project_x,y,z', [f1, f2], format="json") + response = self.client.patch( + "/rest/v2/files?allowed_projects=project_x,y,z", [f1, f2], format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) def test_file_partial_update_list_allowed_projects_not_list(self): new_data_1 = {} - new_data_1['identifier'] = "pid:urn:1" - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["identifier"] = "pid:urn:1" + new_data_1["file_name"] = "Nice_new_name" - res = self.client.patch('/rest/v2/files?allowed_projects=y,z,project_x', new_data_1, format="json") + res = self.client.patch( + "/rest/v2/files?allowed_projects=y,z,project_x", new_data_1, format="json" + ) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) def test_file_partial_update_list_allowed_projects_no_identifier(self): new_data_1 = {} - new_data_1['file_name'] = 'Nice_new_name' + new_data_1["file_name"] = "Nice_new_name" new_data_2 = {} - new_data_2['id'] = 23 - new_data_2['file_name'] = 'Not_so_nice_name' + new_data_2["id"] = 23 + new_data_2["file_name"] = "Not_so_nice_name" res = self.client.patch( - '/rest/v2/files?allowed_projects=y,z,project_x', [new_data_1, new_data_2], format="json" + "/rest/v2/files?allowed_projects=y,z,project_x", + [new_data_1, new_data_2], + format="json", ) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) @@ -972,34 +1163,44 @@ class FileApiWriteDeleteTests(FileApiWriteCommon): def test_delete_single_file_ok(self): dir_count_before = Directory.objects.all().count() - response = self.client.delete('/rest/v2/files/1') + response = self.client.delete("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('deleted_files_count' in response.data, True, response.data) - self.assertEqual(response.data['deleted_files_count'], 1, response.data) + self.assertEqual("deleted_files_count" in response.data, True, response.data) + self.assertEqual(response.data["deleted_files_count"], 1, response.data) dir_count_after = Directory.objects.all().count() - self.assertEqual(dir_count_before, dir_count_after, 'no dirs should have been deleted') + self.assertEqual(dir_count_before, dir_count_after, "no dirs should have been deleted") deleted_file = File.objects_unfiltered.get(pk=1) self._check_project_root_byte_size_and_file_count(deleted_file.project_identifier) - self.assertEqual(deleted_file.date_modified, deleted_file.file_deleted, 'date_modified should be updated') + self.assertEqual( + deleted_file.date_modified, + deleted_file.file_deleted, + "date_modified should be updated", + ) def test_delete_single_file_ok_destroy_leading_dirs(self): - project_identifier = 'project_z' + project_identifier = "project_z" test_data = deepcopy(self.test_new_data) - test_data['file_path'] = '/project_z/some/path/here/%s' % test_data['file_name'] - test_data['project_identifier'] = project_identifier - test_data['identifier'] = 'abc123' - response = self.client.post('/rest/v2/files', test_data, format='json') - self.assertEqual(Directory.objects.filter(project_identifier=project_identifier).exists(), True) + test_data["file_path"] = "/project_z/some/path/here/%s" % test_data["file_name"] + test_data["project_identifier"] = project_identifier + test_data["identifier"] = "abc123" + response = self.client.post("/rest/v2/files", test_data, format="json") + self.assertEqual( + Directory.objects.filter(project_identifier=project_identifier).exists(), + True, + ) - response = self.client.delete('/rest/v2/files/%s' % response.data['id']) + response = self.client.delete("/rest/v2/files/%s" % response.data["id"]) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('deleted_files_count' in response.data, True, response.data) - self.assertEqual(response.data['deleted_files_count'], 1, response.data) + self.assertEqual("deleted_files_count" in response.data, True, response.data) + self.assertEqual(response.data["deleted_files_count"], 1, response.data) - self.assertEqual(Directory.objects.filter(project_identifier=project_identifier).exists(), False) + self.assertEqual( + Directory.objects.filter(project_identifier=project_identifier).exists(), + False, + ) def test_delete_single_file_404(self): - response = self.client.delete('/rest/v2/files/doesnotexist') + response = self.client.delete("/rest/v2/files/doesnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_bulk_delete_files_identifiers_not_found(self): @@ -1007,8 +1208,8 @@ def test_bulk_delete_files_identifiers_not_found(self): A bulk delete request to /files, but any of the identifiers provided are not found. Should return 404. """ - identifiers = ['nope', 'doesnotexist', 'stillno'] - response = self.client.delete('/rest/v2/files', identifiers, format="json") + identifiers = ["nope", "doesnotexist", "stillno"] + response = self.client.delete("/rest/v2/files", identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) def test_bulk_delete_files_some_identifiers_not_found(self): @@ -1017,13 +1218,15 @@ def test_bulk_delete_files_some_identifiers_not_found(self): Should be ok, delete those files that are found. Assumably those identifiers that were not found did not exist anyway, therefore no harm is done. """ - identifiers = ['nope', 'doesnotexist', 'stillno'] + identifiers = ["nope", "doesnotexist", "stillno"] identifiers.append(File.objects.get(pk=1).identifier) - response = self.client.delete('/rest/v2/files', identifiers, format="json") + response = self.client.delete("/rest/v2/files", identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) removed = File.objects_unfiltered.get(pk=1).removed - self.assertEqual(removed, True, 'file should have been removed') - self._check_project_root_byte_size_and_file_count(File.objects_unfiltered.get(pk=1).project_identifier) + self.assertEqual(removed, True, "file should have been removed") + self._check_project_root_byte_size_and_file_count( + File.objects_unfiltered.get(pk=1).project_identifier + ) def test_bulk_delete_files_in_single_directory_1(self): """ @@ -1036,7 +1239,7 @@ def test_bulk_delete_files_in_single_directory_1(self): all_files_count_before = File.objects.all().count() file_ids = [f.id for f in Directory.objects.get(pk=3).files.all()] - response = self.client.delete('/rest/v2/files', file_ids, format="json") + response = self.client.delete("/rest/v2/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) all_files_count_after = File.objects.all().count() @@ -1049,7 +1252,7 @@ def test_bulk_delete_files_in_single_directory_2(self): all_files_count_before = File.objects.all().count() file_ids = [f.id for f in Directory.objects.get(pk=4).files.all()] - response = self.client.delete('/rest/v2/files', file_ids, format="json") + response = self.client.delete("/rest/v2/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) all_files_count_after = File.objects.all().count() @@ -1060,20 +1263,20 @@ def test_bulk_delete_file_list_one_file_id_missing(self): Otherwise complete set of files, but from one dir one file is missing. Should leave the one file intact, while preserving the directory tree. """ - all_files_count_before = File.objects.filter(project_identifier='project_x').count() - file_ids = [f.id for f in File.objects.filter(project_identifier='project_x')] + all_files_count_before = File.objects.filter(project_identifier="project_x").count() + file_ids = [f.id for f in File.objects.filter(project_identifier="project_x")] # everything except the last file should be removed file_ids.pop() - response = self.client.delete('/rest/v2/files', file_ids, format="json") + response = self.client.delete("/rest/v2/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - all_files_after = File.objects.filter(project_identifier='project_x') + all_files_after = File.objects.filter(project_identifier="project_x") self.assertEqual(all_files_after.count(), all_files_count_before - len(file_ids)) expected_dirs_count = self._count_dirs_from_path(all_files_after[0].file_path) - actual_dirs_count = Directory.objects.filter(project_identifier='project_x').count() + actual_dirs_count = Directory.objects.filter(project_identifier="project_x").count() self.assertEqual(actual_dirs_count, expected_dirs_count) def test_bulk_delete_files_from_root(self): @@ -1082,16 +1285,23 @@ def test_bulk_delete_files_from_root(self): so the whole tree should end up being deleted. """ files_to_remove_count = 20 - file_ids = File.objects.filter(project_identifier='project_x').values_list('id', flat=True) + file_ids = File.objects.filter(project_identifier="project_x").values_list("id", flat=True) self.assertEqual(len(file_ids), files_to_remove_count) - response = self.client.delete('/rest/v2/files', file_ids, format="json") + response = self.client.delete("/rest/v2/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('deleted_files_count', None), files_to_remove_count, response.data) + self.assertEqual( + response.data.get("deleted_files_count", None), + files_to_remove_count, + response.data, + ) - self._assert_files_available_and_removed('project_x', 0, files_to_remove_count) - self.assertEqual(Directory.objects_unfiltered.filter(project_identifier='project_x').count(), 0, - 'all dirs should have been permanently removed') + self._assert_files_available_and_removed("project_x", 0, files_to_remove_count) + self.assertEqual( + Directory.objects_unfiltered.filter(project_identifier="project_x").count(), + 0, + "all dirs should have been permanently removed", + ) def test_bulk_delete_sub_directory_1(self): """ @@ -1103,17 +1313,21 @@ def test_bulk_delete_sub_directory_1(self): file_ids += [f.id for f in Directory.objects.get(pk=6).files.all()] self.assertEqual(len(file_ids), files_to_remove_count) - response = self.client.delete('/rest/v2/files', file_ids, format="json") + response = self.client.delete("/rest/v2/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('deleted_files_count', None), files_to_remove_count, response.data) + self.assertEqual( + response.data.get("deleted_files_count", None), + files_to_remove_count, + response.data, + ) - self._assert_files_available_and_removed('project_x', 5, files_to_remove_count) + self._assert_files_available_and_removed("project_x", 5, files_to_remove_count) # these dirs should still be left: # / # /project_x_FROZEN # /project_x_FROZEN/Experiment_X (has 5 files) - self.assertEqual(Directory.objects.filter(project_identifier='project_x').count(), 3) + self.assertEqual(Directory.objects.filter(project_identifier="project_x").count(), 3) def test_bulk_delete_sub_directory_2(self): """ @@ -1124,36 +1338,51 @@ def test_bulk_delete_sub_directory_2(self): file_ids = [f.id for f in Directory.objects.get(pk=6).files.all()] self.assertEqual(len(file_ids), files_to_remove_count) - response = self.client.delete('/rest/v2/files', file_ids, format="json") + response = self.client.delete("/rest/v2/files", file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('deleted_files_count', None), files_to_remove_count, response.data) + self.assertEqual( + response.data.get("deleted_files_count", None), + files_to_remove_count, + response.data, + ) - self._assert_files_available_and_removed('project_x', 10, files_to_remove_count) + self._assert_files_available_and_removed("project_x", 10, files_to_remove_count) # these dirs should still be left: # / # /project_x_FROZEN # /project_x_FROZEN/Experiment_X (5 files) # /project_x_FROZEN/Experiment_X/Phase_1 (5 files) - self.assertEqual(Directory.objects.filter(project_identifier='project_x').count(), 4) + self.assertEqual(Directory.objects.filter(project_identifier="project_x").count(), 4) # /project_x_FROZEN/Experiment_X/Phase_1/2017 <- this dir should be deleted, since # it only contained the 01-dir, which we specifically targeted for deletion - self.assertEqual(Directory.objects.filter( - project_identifier='project_x', - directory_path='/project_x_FROZEN/Experiment_X/Phase_1/2017' - ).count(), 0, 'dir should have been deleted') + self.assertEqual( + Directory.objects.filter( + project_identifier="project_x", + directory_path="/project_x_FROZEN/Experiment_X/Phase_1/2017", + ).count(), + 0, + "dir should have been deleted", + ) def _assert_files_available_and_removed(self, project_identifier, available, removed): """ After deleting files, check qty of files retrievable by usual means is as expected, and qty of files retrievable from objects_unfiltered with removed=True is as expected. """ - self.assertEqual(File.objects.filter(project_identifier=project_identifier).count(), available, - 'files should not be retrievable from removed=False scope') - self.assertEqual(File.objects_unfiltered.filter(project_identifier=project_identifier, removed=True).count(), - removed, - 'files should be retrievable from removed=True scope') + self.assertEqual( + File.objects.filter(project_identifier=project_identifier).count(), + available, + "files should not be retrievable from removed=False scope", + ) + self.assertEqual( + File.objects_unfiltered.filter( + project_identifier=project_identifier, removed=True + ).count(), + removed, + "files should be retrievable from removed=True scope", + ) def test_deleting_files_deprecates_datasets(self): for cr in CatalogRecord.objects.filter(deprecated=True): @@ -1162,33 +1391,37 @@ def test_deleting_files_deprecates_datasets(self): cr.force_save() datasets_with_file = CatalogRecord.objects.filter(files__id=1).count() - response = self.client.delete('/rest/v2/files/1') + response = self.client.delete("/rest/v2/files/1") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(CatalogRecord.objects.filter(deprecated=True).count(), datasets_with_file) class FileApiWriteRestoreTests(FileApiWriteCommon): - def test_restore_files_ok(self): """ Restore a few deleted files from directories, that still contain other files. Restored files should be appended to previously existing files. """ - response = self.client.delete('/rest/v2/files/1') - response = self.client.delete('/rest/v2/files/2') - response = self.client.delete('/rest/v2/files/3') + response = self.client.delete("/rest/v2/files/1") + response = self.client.delete("/rest/v2/files/2") + response = self.client.delete("/rest/v2/files/3") self.assertEqual(response.status_code, status.HTTP_200_OK) - deleted_files = File.objects_unfiltered.filter(pk__in=[1, 2, 3]) \ - .values('identifier', 'parent_directory_id') + deleted_files = File.objects_unfiltered.filter(pk__in=[1, 2, 3]).values( + "identifier", "parent_directory_id" + ) - response = self.client.post('/rest/v2/files/restore', [f['identifier'] for f in deleted_files], format='json') + response = self.client.post( + "/rest/v2/files/restore", + [f["identifier"] for f in deleted_files], + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('restored_files_count' in response.data, True, response.data) - self.assertEqual(response.data['restored_files_count'], 3, response.data) + self.assertEqual("restored_files_count" in response.data, True, response.data) + self.assertEqual(response.data["restored_files_count"], 3, response.data) # ensure restored files are using previously existing directories - old_parent_dirs = { f['parent_directory_id'] for f in deleted_files } + old_parent_dirs = {f["parent_directory_id"] for f in deleted_files} files = File.objects.filter(pk__in=[1, 2, 3]) for f in files: self.assertEqual(f.file_deleted, None) @@ -1201,20 +1434,25 @@ def test_restore_files_recreate_missing_directories(self): """ proj = File.objects.get(pk=1).project_identifier - response = self.client.get('/rest/files?project_identifier=%s&fields=identifier&pagination=false' - % proj, format='json') - file_identifiers = [ f['identifier'] for f in response.data ] + response = self.client.get( + "/rest/files?project_identifier=%s&fields=identifier&pagination=false" % proj, + format="json", + ) + file_identifiers = [f["identifier"] for f in response.data] - self.client.delete('/rest/v2/files', file_identifiers, format='json') + self.client.delete("/rest/v2/files", file_identifiers, format="json") - deleted_directory_ids = File.objects_unfiltered.filter(identifier__in=file_identifiers) \ - .values_list('parent_directory_id', flat=True) - old_parent_dirs = { id for id in deleted_directory_ids } + deleted_directory_ids = File.objects_unfiltered.filter( + identifier__in=file_identifiers + ).values_list("parent_directory_id", flat=True) + old_parent_dirs = {id for id in deleted_directory_ids} - response = self.client.post('/rest/v2/files/restore', file_identifiers, format='json') + response = self.client.post("/rest/v2/files/restore", file_identifiers, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual('restored_files_count' in response.data, True, response.data) - self.assertEqual(response.data['restored_files_count'], len(file_identifiers), response.data) + self.assertEqual("restored_files_count" in response.data, True, response.data) + self.assertEqual( + response.data["restored_files_count"], len(file_identifiers), response.data + ) # ensure restored files are using new directories files = File.objects.filter(identifier__in=file_identifiers) @@ -1222,15 +1460,21 @@ def test_restore_files_recreate_missing_directories(self): self.assertEqual(f.parent_directory_id in old_parent_dirs, False) def test_check_parameter_is_string_list(self): - response = self.client.post('/rest/v2/files/restore', ['a', 'b', 1], format='json') + response = self.client.post("/rest/v2/files/restore", ["a", "b", 1], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_check_files_belong_to_one_project(self): f1 = File.objects_unfiltered.get(pk=1) - f2 = File.objects_unfiltered.filter().exclude(project_identifier=f1.project_identifier).first() - response = self.client.delete('/rest/v2/files/%d' % f1.id) - response = self.client.delete('/rest/v2/files/%d' % f2.id) - response = self.client.post('/rest/v2/files/restore', [ f1.identifier, f2.identifier ], format='json') + f2 = ( + File.objects_unfiltered.filter() + .exclude(project_identifier=f1.project_identifier) + .first() + ) + response = self.client.delete("/rest/v2/files/%d" % f1.id) + response = self.client.delete("/rest/v2/files/%d" % f2.id) + response = self.client.post( + "/rest/v2/files/restore", [f1.identifier, f2.identifier], format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) @@ -1240,52 +1484,66 @@ class FileApiWriteXmlTests(FileApiWriteCommon): """ def test_xml_api(self): - content_type = 'application/xml' - data = 'tauhketta yeah' + content_type = "application/xml" + data = ( + 'tauhketta yeah' + ) # create - response = self.client.post('/rest/v2/files/1/xml?namespace=breh', data, content_type=content_type) + response = self.client.post( + "/rest/v2/files/1/xml?namespace=breh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) - self.assertEqual('updated stuff' - response = self.client.put('/rest/v2/files/1/xml?namespace=breh', data, content_type=content_type) + response = self.client.put( + "/rest/v2/files/1/xml?namespace=breh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) # get updated again - response = self.client.get('/rest/v2/files/1/xml?namespace=breh', content_type=content_type, ) - self.assertEqual('updated stuff' in response.data, True) + response = self.client.get( + "/rest/v2/files/1/xml?namespace=breh", + content_type=content_type, + ) + self.assertEqual("updated stuff" in response.data, True) # delete - response = self.client.delete('/rest/v2/files/1/xml?namespace=breh', data, content_type=content_type) + response = self.client.delete( + "/rest/v2/files/1/xml?namespace=breh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) - response = self.client.delete('/rest/v2/files/1/xml?namespace=bruh', data, content_type=content_type) + response = self.client.delete( + "/rest/v2/files/1/xml?namespace=bruh", data, content_type=content_type + ) self.assertEqual(response.status_code in (200, 201, 204), True) # get list - response = self.client.get('/rest/v2/files/1/xml', content_type=content_type) + response = self.client.get("/rest/v2/files/1/xml", content_type=content_type) self.assertEqual(response.status_code in (200, 201, 204), True) class FileApiWriteEndUserAccess(FileApiWriteCommon): - def setUp(self): super().setUp() self.token = get_test_oidc_token() @@ -1293,110 +1551,108 @@ def setUp(self): @responses.activate def test_user_cant_create_files(self): - ''' + """ Ensure users are unable to create new files. - ''' + """ # ensure user belongs to same project - self.token['group_names'].append('IDA01:%s' % self.test_new_data['project_identifier']) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % self.test_new_data["project_identifier"]) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.post('/rest/v2/files', self.test_new_data, format="json") + response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) @responses.activate def test_user_can_only_update_permitted_file_fields(self): - ''' + """ Ensure users are only able to modify permitted fields. - ''' + """ # ensure user belongs to same project proj = File.objects.get(pk=1).project_identifier - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get('/rest/v2/files/1', format="json") + response = self.client.get("/rest/v2/files/1", format="json") file = response.data original_file = deepcopy(file) - file['byte_size'] = 200 - file['checksum']['value'] = 'changed' - file['parent_directory'] = 1 - file['file_frozen'] = '3' + file['file_frozen'][1:] - file['file_format'] = 'changed' - file['file_name'] = 'changed' - file['file_path'] = '/oh/no' - file['file_storage'] = 2 - file['file_uploaded'] = '3' + file['file_uploaded'][1:] - file['identifier'] = 'changed' - file['open_access'] = True - file['project_identifier'] = 'changed' - file['service_modified'] = 'changed' - file['service_created'] = 'changed' - file['removed'] = True + file["byte_size"] = 200 + file["checksum"]["value"] = "changed" + file["parent_directory"] = 1 + file["file_frozen"] = "3" + file["file_frozen"][1:] + file["file_format"] = "changed" + file["file_name"] = "changed" + file["file_path"] = "/oh/no" + file["file_storage"] = 2 + file["file_uploaded"] = "3" + file["file_uploaded"][1:] + file["identifier"] = "changed" + file["open_access"] = True + file["project_identifier"] = "changed" + file["service_modified"] = "changed" + file["service_created"] = "changed" + file["removed"] = True # the only field that should be changed - file['file_characteristics'] = { 'title': 'new title'} + file["file_characteristics"] = {"title": "new title"} - response = self.client.put('/rest/v2/files/1', file, format="json") + response = self.client.put("/rest/v2/files/1", file, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['file_characteristics']['title'], 'new title', response.data) + self.assertEqual(response.data["file_characteristics"]["title"], "new title", response.data) for key, value in response.data.items(): try: - if key in ('date_modified', 'file_modified'): + if key in ("date_modified", "file_modified"): # these fields are changed by metax continue - elif key == 'file_characteristics': + elif key == "file_characteristics": # the field that should have been changed by the user self.assertNotEqual(original_file[key], response.data[key]) else: # must not have changed self.assertEqual(original_file[key], response.data[key]) except KeyError as e: - if e.args[0] == 'user_modified': + if e.args[0] == "user_modified": # added by metax continue raise @responses.activate def test_user_can_update_files_in_their_projects(self): - ''' + """ Ensure users can edit files in projects they are a member of. - ''' - proj = File.objects.only('project_identifier').get(pk=1).project_identifier + """ + proj = File.objects.only("project_identifier").get(pk=1).project_identifier - response = self.client.get('/rest/v2/files?project_identifier=%s' % proj, - format="json") + response = self.client.get("/rest/v2/files?project_identifier=%s" % proj, format="json") - file = response.data['results'][0] + file = response.data["results"][0] - self.token['group_names'].append('IDA01:%s' % proj) - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"].append("IDA01:%s" % proj) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.put('/rest/v2/files/%s' % file['id'], file, format="json") + response = self.client.put("/rest/v2/files/%s" % file["id"], file, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.put('/rest/v2/files', [file], format="json") + response = self.client.put("/rest/v2/files", [file], format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) @responses.activate def test_user_cant_update_files_in_others_projects(self): - ''' + """ Ensure users can not edit files in projects they are not a member of. - ''' - proj = File.objects.only('project_identifier').get(pk=1).project_identifier + """ + proj = File.objects.only("project_identifier").get(pk=1).project_identifier - response = self.client.get('/rest/v2/files?project_identifier=%s' % proj, - format="json") + response = self.client.get("/rest/v2/files?project_identifier=%s" % proj, format="json") - file = response.data['results'][0] + file = response.data["results"][0] - self.token['group_names'] = ['no_files_for_this_project'] - self._use_http_authorization(method='bearer', token=self.token) + self.token["group_names"] = ["no_files_for_this_project"] + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.put('/rest/v2/files/%s' % file['id'], file, format="json") + response = self.client.put("/rest/v2/files/%s" % file["id"], file, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.put('/rest/v2/files', [file], format="json") + response = self.client.put("/rest/v2/files", [file], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) @@ -1414,8 +1670,14 @@ def test_dryrun(self): Ensure query parameter ?dryrun=true returns same result as they normally would, but changes made during the request do not get saved in the db. """ - response = self.client.post('/rest/v2/files?what&dryrun=true&other', self.test_new_data, format="json") + response = self.client.post( + "/rest/v2/files?what&dryrun=true&other", self.test_new_data, format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual('id' in response.data, True) - found = File.objects.filter(pk=response.data['id']).exists() - self.assertEqual(found, False, 'file should not get truly created when using parameter dryrun') + self.assertEqual("id" in response.data, True) + found = File.objects.filter(pk=response.data["id"]).exists() + self.assertEqual( + found, + False, + "file should not get truly created when using parameter dryrun", + ) diff --git a/src/metax_api/tests/api/rest/v2/views/filestorages/read.py b/src/metax_api/tests/api/rest/v2/views/filestorages/read.py index 0a57c688..bbb54665 100755 --- a/src/metax_api/tests/api/rest/v2/views/filestorages/read.py +++ b/src/metax_api/tests/api/rest/v2/views/filestorages/read.py @@ -14,13 +14,12 @@ class FileStorageApiReadBasicTests(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileStorageApiReadBasicTests, cls).setUpClass() def setUp(self): @@ -28,13 +27,17 @@ def setUp(self): def test_basic_get(self): fs = FileStorage.objects.get(pk=1) - response = self.client.get('/rest/v2/filestorages/%d' % fs.id) + response = self.client.get("/rest/v2/filestorages/%d" % fs.id) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/filestorages/%s' % fs.file_storage_json['identifier']) + response = self.client.get("/rest/v2/filestorages/%s" % fs.file_storage_json["identifier"]) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) def test_basic_list(self): - response = self.client.get('/rest/v2/filestorages') + response = self.client.get("/rest/v2/filestorages") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data['results']), FileStorage.objects.all().count(), response.data) + self.assertEqual( + len(response.data["results"]), + FileStorage.objects.all().count(), + response.data, + ) diff --git a/src/metax_api/tests/api/rest/v2/views/filestorages/write.py b/src/metax_api/tests/api/rest/v2/views/filestorages/write.py index 53b5adbb..e8f86d32 100755 --- a/src/metax_api/tests/api/rest/v2/views/filestorages/write.py +++ b/src/metax_api/tests/api/rest/v2/views/filestorages/write.py @@ -14,39 +14,40 @@ class FileStorageApiWriteCommon(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileStorageApiWriteCommon, cls).setUpClass() def setUp(self): - self.new_test_data = self._get_object_from_test_data('filestorage') - self.new_test_data.pop('id') - self.new_test_data['file_storage_json']['identifier'] = 'new-file-storage' + self.new_test_data = self._get_object_from_test_data("filestorage") + self.new_test_data.pop("id") + self.new_test_data["file_storage_json"]["identifier"] = "new-file-storage" self._use_http_authorization() class FileStorageApiWriteBasicTests(FileStorageApiWriteCommon): - def test_create(self): - response = self.client.post('/rest/v2/filestorages', self.new_test_data, format="json") + response = self.client.post("/rest/v2/filestorages", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_create_identifier_already_exists(self): - response = self.client.post('/rest/v2/filestorages', self.new_test_data, format="json") + response = self.client.post("/rest/v2/filestorages", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) - response = self.client.post('/rest/v2/filestorages', self.new_test_data, format="json") + response = self.client.post("/rest/v2/filestorages", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('already exists' in response.data['file_storage_json']['identifier'][0], - True, response.data) + self.assertEqual( + "already exists" in response.data["file_storage_json"]["identifier"][0], + True, + response.data, + ) def test_delete(self): - response = self.client.delete('/rest/v2/filestorages/1') + response = self.client.delete("/rest/v2/filestorages/1") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) fs = FileStorage.objects_unfiltered.get(pk=1) - self.assertEqual(fs.removed, True, 'should be deleted') + self.assertEqual(fs.removed, True, "should be deleted") self.assertEqual(fs.date_removed, fs.date_modified) diff --git a/src/metax_api/tests/api/rest/v2/views/schemas/read.py b/src/metax_api/tests/api/rest/v2/views/schemas/read.py index 9e210a74..3fd372cf 100755 --- a/src/metax_api/tests/api/rest/v2/views/schemas/read.py +++ b/src/metax_api/tests/api/rest/v2/views/schemas/read.py @@ -12,25 +12,24 @@ class SchemaApiReadTests(APITestCase, TestClassUtils): - def test_read_schemas_list(self): - response = self.client.get('/rest/v2/schemas') + response = self.client.get("/rest/v2/schemas") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data['count'] > 0) + self.assertTrue(response.data["count"] > 0) def test_read_schemas_list_html(self): - headers = {'HTTP_ACCEPT': 'text/html'} - response = self.client.get('/rest/v2/schemas', **headers) + headers = {"HTTP_ACCEPT": "text/html"} + response = self.client.get("/rest/v2/schemas", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response._headers['content-type'][1].find('text/html') >= 0) + self.assertTrue(response._headers["content-type"][1].find("text/html") >= 0) def test_read_schema_retrieve_existing(self): - list_response = self.client.get('/rest/v2/schemas') + list_response = self.client.get("/rest/v2/schemas") self.assertEqual(list_response.status_code, status.HTTP_200_OK) - self.assertTrue(list_response.data['count'] > 0, 'No schemas available') - response = self.client.get('/rest/v2/schemas/%s' % list_response.data['results'][0]) + self.assertTrue(list_response.data["count"] > 0, "No schemas available") + response = self.client.get("/rest/v2/schemas/%s" % list_response.data["results"][0]) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_read_schema_not_exists(self): - response = self.client.get('/rest/v2/schemas/thisshouldnotexist') + response = self.client.get("/rest/v2/schemas/thisshouldnotexist") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) diff --git a/src/metax_api/tests/api/rpc/base/views/common_rpc.py b/src/metax_api/tests/api/rpc/base/views/common_rpc.py index 1d0c5e7e..abbe3220 100755 --- a/src/metax_api/tests/api/rpc/base/views/common_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/common_rpc.py @@ -12,12 +12,11 @@ class CommonRPCTests(APITestCase, TestClassUtils): - def test_list_valid_methods(self): """ When an invalid (or mistyped) method name is attempted, the api should list valid methods names for that RPC endpoint. """ - response = self.client.get('/rpc/datasets/nonexisting') + response = self.client.get("/rpc/datasets/nonexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('methods are: ' in response.data['detail'][0], True, response.content) + self.assertEqual("methods are: " in response.data["detail"][0], True, response.content) diff --git a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py index 84d7bebe..84771c8d 100755 --- a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py @@ -20,14 +20,13 @@ class DatasetRPCTests(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ super().setUpClass() - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) def setUp(self): super().setUp() @@ -40,81 +39,87 @@ def test_get_minimal_dataset_template(self): """ # query param type is missing, should return error and description what to do. - response = self.client.get('/rpc/datasets/get_minimal_dataset_template') + response = self.client.get("/rpc/datasets/get_minimal_dataset_template") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # test preventing typos - response = self.client.get('/rpc/datasets/get_minimal_dataset_template?type=wrong') + response = self.client.get("/rpc/datasets/get_minimal_dataset_template?type=wrong") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # test minimal dataset for service use - response = self.client.get('/rpc/datasets/get_minimal_dataset_template?type=service') + response = self.client.get("/rpc/datasets/get_minimal_dataset_template?type=service") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue('metadata_provider_org' in response.data) - self.assertTrue('metadata_provider_user' in response.data) - self._use_http_authorization(username='testuser') - response = self.client.post('/rest/datasets', response.data, format="json") + self.assertTrue("metadata_provider_org" in response.data) + self.assertTrue("metadata_provider_user" in response.data) + self._use_http_authorization(username="testuser") + response = self.client.post("/rest/datasets", response.data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) # test minimal dataset for end user use - response = self.client.get('/rpc/datasets/get_minimal_dataset_template?type=enduser') + response = self.client.get("/rpc/datasets/get_minimal_dataset_template?type=enduser") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue('metadata_provider_org' not in response.data) - self.assertTrue('metadata_provider_user' not in response.data) - self._use_http_authorization(method='bearer', token=get_test_oidc_token()) + self.assertTrue("metadata_provider_org" not in response.data) + self.assertTrue("metadata_provider_user" not in response.data) + self._use_http_authorization(method="bearer", token=get_test_oidc_token()) self._mock_token_validation_succeeds() - response = self.client.post('/rest/datasets', response.data, format="json") + response = self.client.post("/rest/datasets", response.data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_set_preservation_identifier(self): - self._set_http_authorization('service') + self._set_http_authorization("service") # Parameter 'identifier' is required - response = self.client.post('/rpc/datasets/set_preservation_identifier') + response = self.client.post("/rpc/datasets/set_preservation_identifier") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # Nonexisting identifier should return 404 - response = self.client.post('/rpc/datasets/set_preservation_identifier?identifier=nonexisting') + response = self.client.post( + "/rpc/datasets/set_preservation_identifier?identifier=nonexisting" + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = settings.IDA_DATA_CATALOG_IDENTIFIER - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/datacatalogs", dc, format="json") # Test OK ops # Create new ida cr without doi - cr_json = self.client.get('/rest/datasets/1').data - cr_json.pop('preservation_identifier', None) - cr_json.pop('identifier') - cr_json['research_dataset'].pop('preferred_identifier', None) - cr_json['data_catalog'] = dc_id - cr_json['research_dataset']['issued'] = '2018-01-01' - cr_json['research_dataset']['publisher'] = { - '@type': 'Organization', - 'name': { 'en': 'publisher' } + cr_json = self.client.get("/rest/datasets/1").data + cr_json.pop("preservation_identifier", None) + cr_json.pop("identifier") + cr_json["research_dataset"].pop("preferred_identifier", None) + cr_json["data_catalog"] = dc_id + cr_json["research_dataset"]["issued"] = "2018-01-01" + cr_json["research_dataset"]["publisher"] = { + "@type": "Organization", + "name": {"en": "publisher"}, } - response = self.client.post('/rest/datasets?pid_type=urn', cr_json, format="json") + response = self.client.post("/rest/datasets?pid_type=urn", cr_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - identifier = response.data['identifier'] + identifier = response.data["identifier"] # Verify rpc api returns the same doi as the one that is set to the datasets' preservation identifier - response = self.client.post(f'/rpc/datasets/set_preservation_identifier?identifier={identifier}') + response = self.client.post( + f"/rpc/datasets/set_preservation_identifier?identifier={identifier}" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response2 = self.client.get(f'/rest/datasets/{identifier}') + response2 = self.client.get(f"/rest/datasets/{identifier}") self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) - self.assertEqual(response.data, response2.data['preservation_identifier'], response2.data) + self.assertEqual(response.data, response2.data["preservation_identifier"], response2.data) # Return 400 if request is not correct datacite format - response2.data['research_dataset'].pop('issued') - response = self.client.put(f'/rest/datasets/{identifier}', response2.data, format="json") + response2.data["research_dataset"].pop("issued") + response = self.client.put(f"/rest/datasets/{identifier}", response2.data, format="json") self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) - response = self.client.post(f'/rpc/datasets/set_preservation_identifier?identifier={identifier}') + response = self.client.post( + f"/rpc/datasets/set_preservation_identifier?identifier={identifier}" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) @@ -126,16 +131,16 @@ class ChangeCumulativeStateRPC(CatalogRecordApiWriteCommon): """ def _create_cumulative_dataset(self, state): - self.cr_test_data['cumulative_state'] = state + self.cr_test_data["cumulative_state"] = state - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['cumulative_state'], state, response.data) + self.assertEqual(response.data["cumulative_state"], state, response.data) return response.data def _update_cr_cumulative_state(self, identifier, state, result=status.HTTP_204_NO_CONTENT): - url = '/rpc/datasets/change_cumulative_state?identifier=%s&cumulative_state=%d' + url = "/rpc/datasets/change_cumulative_state?identifier=%s&cumulative_state=%d" response = self.client.post(url % (identifier, state), format="json") self.assertEqual(response.status_code, result, response.data) @@ -143,15 +148,17 @@ def _update_cr_cumulative_state(self, identifier, state, result=status.HTTP_204_ return response.data def _get_cr(self, identifier): - response = self.client.get('/rest/datasets/%s' % identifier, format="json") + response = self.client.get("/rest/datasets/%s" % identifier, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data def _assert_file_counts(self, new_version): - new_count = CatalogRecord.objects.get(pk=new_version['id']).files.count() - old_count = CatalogRecord.objects.get(pk=new_version['previous_dataset_version']['id']).files.count() - self.assertEqual(new_count, old_count, 'file count between versions should match') + new_count = CatalogRecord.objects.get(pk=new_version["id"]).files.count() + old_count = CatalogRecord.objects.get( + pk=new_version["previous_dataset_version"]["id"] + ).files.count() + self.assertEqual(new_count, old_count, "file count between versions should match") def test_transitions_from_NO(self): """ @@ -159,215 +166,248 @@ def test_transitions_from_NO(self): New version is created if non-cumulative dataset is marked actively cumulative. """ cr_orig = self._create_cumulative_dataset(0) - orig_preferred_identifier = cr_orig['research_dataset']['preferred_identifier'] + orig_preferred_identifier = cr_orig["research_dataset"]["preferred_identifier"] orig_record_count = CatalogRecord.objects.all().count() - self._update_cr_cumulative_state(cr_orig['identifier'], 2, status.HTTP_400_BAD_REQUEST) + self._update_cr_cumulative_state(cr_orig["identifier"], 2, status.HTTP_400_BAD_REQUEST) - self._update_cr_cumulative_state(cr_orig['identifier'], 1, status.HTTP_200_OK) + self._update_cr_cumulative_state(cr_orig["identifier"], 1, status.HTTP_200_OK) self.assertEqual(CatalogRecord.objects.all().count(), orig_record_count + 1) # get updated dataset - old_version = self._get_cr(cr_orig['identifier']) - self.assertEqual(old_version['cumulative_state'], 0, 'original status should not changed') - self.assertTrue('next_dataset_version' in old_version, 'should have new dataset') + old_version = self._get_cr(cr_orig["identifier"]) + self.assertEqual(old_version["cumulative_state"], 0, "original status should not changed") + self.assertTrue("next_dataset_version" in old_version, "should have new dataset") # cannot change old dataset cumulative_status - self._update_cr_cumulative_state(old_version['identifier'], 2, status.HTTP_400_BAD_REQUEST) + self._update_cr_cumulative_state(old_version["identifier"], 2, status.HTTP_400_BAD_REQUEST) # new version of the dataset should have new cumulative state - new_version = self._get_cr(old_version['next_dataset_version']['identifier']) - self.assertTrue(new_version['research_dataset']['preferred_identifier'] != orig_preferred_identifier) - self.assertEqual(new_version['cumulative_state'], 1, 'new version should have changed status') + new_version = self._get_cr(old_version["next_dataset_version"]["identifier"]) + self.assertTrue( + new_version["research_dataset"]["preferred_identifier"] != orig_preferred_identifier + ) + self.assertEqual( + new_version["cumulative_state"], 1, "new version should have changed status" + ) self._assert_file_counts(new_version) def test_transitions_from_YES(self): cr = self._create_cumulative_dataset(1) orig_record_count = CatalogRecord.objects.all().count() - self._update_cr_cumulative_state(cr['identifier'], 0, status.HTTP_400_BAD_REQUEST) + self._update_cr_cumulative_state(cr["identifier"], 0, status.HTTP_400_BAD_REQUEST) self.assertEqual(CatalogRecord.objects.all().count(), orig_record_count) # active to non-active cumulation is legal - self._update_cr_cumulative_state(cr['identifier'], 2) - cr = self._get_cr(cr['identifier']) - self.assertEqual(cr['cumulative_state'], 2, 'dataset should have changed status') + self._update_cr_cumulative_state(cr["identifier"], 2) + cr = self._get_cr(cr["identifier"]) + self.assertEqual(cr["cumulative_state"], 2, "dataset should have changed status") def test_correct_response_data(self): """ Tests that correct information is set to response. """ cr = self._create_cumulative_dataset(0) - return_data = self._update_cr_cumulative_state(cr['identifier'], 1, status.HTTP_200_OK) - self.assertTrue('new_version_created' in return_data, 'new_version_created should be returned') - new_version_identifier = return_data['new_version_created']['identifier'] - cr = self._get_cr(cr['identifier']) - self.assertEqual(cr['next_dataset_version']['identifier'], new_version_identifier) + return_data = self._update_cr_cumulative_state(cr["identifier"], 1, status.HTTP_200_OK) + self.assertTrue( + "new_version_created" in return_data, + "new_version_created should be returned", + ) + new_version_identifier = return_data["new_version_created"]["identifier"] + cr = self._get_cr(cr["identifier"]) + self.assertEqual(cr["next_dataset_version"]["identifier"], new_version_identifier) new_cr = self._get_cr(new_version_identifier) - return_data = self._update_cr_cumulative_state(new_cr['identifier'], 2) - self.assertEqual(return_data, None, 'when new version is not created, return should be None') + return_data = self._update_cr_cumulative_state(new_cr["identifier"], 2) + self.assertEqual( + return_data, None, "when new version is not created, return should be None" + ) class RefreshDirectoryContent(CatalogRecordApiWriteAssignFilesCommon): - url = '/rpc/datasets/refresh_directory_content?cr_identifier=%s&dir_identifier=%s' + url = "/rpc/datasets/refresh_directory_content?cr_identifier=%s&dir_identifier=%s" def _assert_rd_total_byte_size(self, file_size_before, file_size_after, expected_addition): self.assertEqual(file_size_after, file_size_before + expected_addition) def test_refresh_adds_new_files(self): - self._add_directory(self.cr_test_data, '/TestExperiment') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] - dir_id = response.data['research_dataset']['directories'][0]['identifier'] - file_byte_size_before = response.data['research_dataset']['total_files_byte_size'] + cr_id = response.data["identifier"] + dir_id = response.data["research_dataset"]["directories"][0]["identifier"] + file_byte_size_before = response.data["research_dataset"]["total_files_byte_size"] # freeze two files to /TestExperiment/Directory_2 self._freeze_files_to_root() response = self.client.post(self.url % (cr_id, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['number_of_files_added'], 2) - - new_version = CatalogRecord.objects.get(id=response.data['new_version_created']['id']) - file_size_after = new_version.research_dataset['total_files_byte_size'] - self.assertEqual(new_version.files.count(), new_version.previous_dataset_version.files.count() + 2) - self._assert_rd_total_byte_size(file_byte_size_before, file_size_after, self._single_file_byte_size * 2) + self.assertEqual(response.data["number_of_files_added"], 2) + + new_version = CatalogRecord.objects.get(id=response.data["new_version_created"]["id"]) + file_size_after = new_version.research_dataset["total_files_byte_size"] + self.assertEqual( + new_version.files.count(), + new_version.previous_dataset_version.files.count() + 2, + ) + self._assert_rd_total_byte_size( + file_byte_size_before, file_size_after, self._single_file_byte_size * 2 + ) # freeze two files to /TestExperiment/Directory_2/Group_3 self._freeze_new_files() response = self.client.post(self.url % (new_version.identifier, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['number_of_files_added'], 2) + self.assertEqual(response.data["number_of_files_added"], 2) - new_version = CatalogRecord.objects.get(id=response.data['new_version_created']['id']) - self.assertEqual(new_version.files.count(), new_version.previous_dataset_version.files.count() + 2) + new_version = CatalogRecord.objects.get(id=response.data["new_version_created"]["id"]) + self.assertEqual( + new_version.files.count(), + new_version.previous_dataset_version.files.count() + 2, + ) def test_adding_parent_dir_allows_refreshes_to_child_dirs(self): """ When parent directory is added to dataset, refreshes to child directories are also possible. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] + cr_id = response.data["identifier"] self._freeze_new_files() - frozen_dir = Directory.objects.filter(directory_path='/TestExperiment/Directory_2/Group_3').first() + frozen_dir = Directory.objects.filter( + directory_path="/TestExperiment/Directory_2/Group_3" + ).first() response = self.client.post(self.url % (cr_id, frozen_dir.identifier), format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['number_of_files_added'], 2) + self.assertEqual(response.data["number_of_files_added"], 2) - new_version = CatalogRecord.objects.get(id=response.data['new_version_created']['id']) - self.assertEqual(new_version.files.count(), new_version.previous_dataset_version.files.count() + 2) + new_version = CatalogRecord.objects.get(id=response.data["new_version_created"]["id"]) + self.assertEqual( + new_version.files.count(), + new_version.previous_dataset_version.files.count() + 2, + ) def test_refresh_adds_new_files_multiple_locations(self): - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] - dir_id = response.data['research_dataset']['directories'][0]['identifier'] + cr_id = response.data["identifier"] + dir_id = response.data["research_dataset"]["directories"][0]["identifier"] self._freeze_new_files() self._freeze_files_to_root() response = self.client.post(self.url % (cr_id, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['number_of_files_added'], 4) + self.assertEqual(response.data["number_of_files_added"], 4) - new_version = CatalogRecord.objects.get(id=response.data['new_version_created']['id']) - self.assertEqual(new_version.files.count(), new_version.previous_dataset_version.files.count() + 4) + new_version = CatalogRecord.objects.get(id=response.data["new_version_created"]["id"]) + self.assertEqual( + new_version.files.count(), + new_version.previous_dataset_version.files.count() + 4, + ) def test_refresh_adds_no_new_files_from_upper_dirs(self): """ Include parent/subdir and freeze files to parent. Should be no changes in the dataset. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] - dir_id = response.data['research_dataset']['directories'][0]['identifier'] + cr_id = response.data["identifier"] + dir_id = response.data["research_dataset"]["directories"][0]["identifier"] file_count_before = CatalogRecord.objects.get(identifier=cr_id).files.count() self._freeze_files_to_root() response = self.client.post(self.url % (cr_id, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['number_of_files_added'], 0) + self.assertEqual(response.data["number_of_files_added"], 0) cr_after = CatalogRecord.objects.get(identifier=cr_id) - self.assertEqual(cr_after.next_dataset_version, None, 'should not have new dataset version') - self.assertEqual(cr_after.files.count(), file_count_before, 'No new files should be added') + self.assertEqual(cr_after.next_dataset_version, None, "should not have new dataset version") + self.assertEqual(cr_after.files.count(), file_count_before, "No new files should be added") def test_refresh_with_cumulative_state_yes(self): """ When dataset has cumulation active, files are added to dataset but no new version is created. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - self.cr_test_data['cumulative_state'] = 1 - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + self.cr_test_data["cumulative_state"] = 1 + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] - dir_id = response.data['research_dataset']['directories'][0]['identifier'] + cr_id = response.data["identifier"] + dir_id = response.data["research_dataset"]["directories"][0]["identifier"] file_count_before = CatalogRecord.objects.get(identifier=cr_id).files.count() - file_byte_size_before = response.data['research_dataset']['total_files_byte_size'] + file_byte_size_before = response.data["research_dataset"]["total_files_byte_size"] self._freeze_new_files() self._freeze_files_to_root() response = self.client.post(self.url % (cr_id, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['number_of_files_added'], 4) + self.assertEqual(response.data["number_of_files_added"], 4) cr_after = CatalogRecord.objects.get(identifier=cr_id) - file_size_after = cr_after.research_dataset['total_files_byte_size'] - self.assertEqual(cr_after.next_dataset_version, None, 'should not have new dataset version') - self.assertEqual(len(cr_after.get_metadata_version_listing()), 2, 'new metadata version should be created') + file_size_after = cr_after.research_dataset["total_files_byte_size"] + self.assertEqual(cr_after.next_dataset_version, None, "should not have new dataset version") + self.assertEqual( + len(cr_after.get_metadata_version_listing()), + 2, + "new metadata version should be created", + ) self.assertEqual(cr_after.files.count(), file_count_before + 4) - self._assert_rd_total_byte_size(file_byte_size_before, file_size_after, self._single_file_byte_size * 4) + self._assert_rd_total_byte_size( + file_byte_size_before, file_size_after, self._single_file_byte_size * 4 + ) # check that added sub dir is found in catalog records internal variables - new_dir = \ - Directory.objects\ - .filter(directory_path__startswith='/TestExperiment/Directory_2/Group_3')\ - .first() - - self.assertTrue(str(new_dir.id) in cr_after._directory_data, 'New dir id should be found in cr') + new_dir = Directory.objects.filter( + directory_path__startswith="/TestExperiment/Directory_2/Group_3" + ).first() + + self.assertTrue( + str(new_dir.id) in cr_after._directory_data, + "New dir id should be found in cr", + ) self.assertEqual(new_dir.byte_size, self._single_file_byte_size * 2) def test_refreshing_deprecated_dataset_is_not_allowed(self): - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] - dir_id = response.data['research_dataset']['directories'][0]['identifier'] + cr_id = response.data["identifier"] + dir_id = response.data["research_dataset"]["directories"][0]["identifier"] removed_file_id = CatalogRecord.objects.get(identifier=cr_id).files.all()[0].id - response = self.client.delete(f'/rest/files/{removed_file_id}') + response = self.client.delete(f"/rest/files/{removed_file_id}") self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get(f'/rest/datasets/{cr_id}') + response = self.client.get(f"/rest/datasets/{cr_id}") self.assertEqual(response.status_code, status.HTTP_200_OK) depr_cr = response.data self._freeze_new_files() - response = self.client.post(self.url % (depr_cr['identifier'], dir_id), format="json") + response = self.client.post(self.url % (depr_cr["identifier"], dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) def test_adding_files_from_non_assigned_dir_is_not_allowed(self): """ Only allow adding files from directories which paths are included in the research dataset. """ - self._add_directory(self.cr_test_data, '/SecondExperiment/Data') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/SecondExperiment/Data") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['identifier'] + cr_id = response.data["identifier"] # create another dataset so that dir /SecondExperiment/Data_Config will be created - self._add_directory(self.cr_test_data, '/SecondExperiment/Data_Config') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/SecondExperiment/Data_Config") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - dir_id = response.data['research_dataset']['directories'][1]['identifier'] + dir_id = response.data["research_dataset"]["directories"][1]["identifier"] response = self.client.post(self.url % (cr_id, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('not included' in response.data['detail'][0], response.data) + self.assertTrue("not included" in response.data["detail"][0], response.data) class FixDeprecatedTests(CatalogRecordApiWriteAssignFilesCommon): @@ -380,10 +420,12 @@ def _get_next_dataset_version(self, identifier): """ Returns next dataset version for dataset """ - response = self.client.get('/rest/datasets/%s' % identifier) + response = self.client.get("/rest/datasets/%s" % identifier) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue('next_dataset_version' in response.data, 'new dataset should be created') - response = self.client.get('/rest/datasets/%s' % response.data['next_dataset_version']['identifier']) + self.assertTrue("next_dataset_version" in response.data, "new dataset should be created") + response = self.client.get( + "/rest/datasets/%s" % response.data["next_dataset_version"]["identifier"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data @@ -394,12 +436,17 @@ def _check_new_dataset_version(self, identifier, file_count_before, deleted_file files included. Research_dataset must be checked separately. """ new_cr_version = self._get_next_dataset_version(identifier) - new_version_files = CatalogRecord.objects.get(pk=new_cr_version['id']).files.all() - self.assertEqual(new_cr_version['deprecated'], False, 'deprecated flag should be fixed') - self.assertEqual(new_version_files.count(), file_count_before - len(deleted_file_ids), - 'new file count should be on smaller than before') - self.assertTrue(all(d not in new_version_files.values_list('id', flat=True) for d in deleted_file_ids), - 'Deleted files should not be found in new version') + new_version_files = CatalogRecord.objects.get(pk=new_cr_version["id"]).files.all() + self.assertEqual(new_cr_version["deprecated"], False, "deprecated flag should be fixed") + self.assertEqual( + new_version_files.count(), + file_count_before - len(deleted_file_ids), + "new file count should be on smaller than before", + ) + self.assertTrue( + all(d not in new_version_files.values_list("id", flat=True) for d in deleted_file_ids), + "Deleted files should not be found in new version", + ) return new_cr_version @@ -407,10 +454,11 @@ def _delete_files_from_directory_path(self, path): """ Deletes files from sub directories as well """ - deleted_file_ids = [ id for id in File.objects - .filter(file_path__startswith=path) - .values_list('id', flat=True) ] - response = self.client.delete('/rest/files', deleted_file_ids, format="json") + deleted_file_ids = [ + id + for id in File.objects.filter(file_path__startswith=path).values_list("id", flat=True) + ] + response = self.client.delete("/rest/files", deleted_file_ids, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) return deleted_file_ids @@ -420,129 +468,187 @@ def test_fix_deprecated_files(self): # delete file from dataset deleted_file = File.objects.get(pk=1) - response = self.client.delete('/rest/files/%s' % deleted_file.identifier) + response = self.client.delete("/rest/files/%s" % deleted_file.identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data['deprecated'], 'dataset should be deprecated') - identifier = response.data['identifier'] + self.assertTrue(response.data["deprecated"], "dataset should be deprecated") + identifier = response.data["identifier"] # fix deprecated dataset - response = self.client.post('/rpc/datasets/fix_deprecated?identifier=%s' % identifier) + response = self.client.post("/rpc/datasets/fix_deprecated?identifier=%s" % identifier) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure that new dataset version is not deprecated, dataset files contain only the non-removed file # and removed file is deleted from research_dataset - new_cr_version = self._check_new_dataset_version(identifier, file_count_before, [deleted_file.id]) - rd_filenames = [ f['identifier'] for f in new_cr_version['research_dataset']['files'] ] - self.assertTrue(deleted_file.identifier not in rd_filenames, 'deleted file should not be in research_dataset') + new_cr_version = self._check_new_dataset_version( + identifier, file_count_before, [deleted_file.id] + ) + rd_filenames = [f["identifier"] for f in new_cr_version["research_dataset"]["files"]] + self.assertTrue( + deleted_file.identifier not in rd_filenames, + "deleted file should not be in research_dataset", + ) def test_fix_deprecated_directories(self): """ This test adds parent directory of two files to a dataset and deletes the files """ # add/describe the parent directory of the newly added file to the dataset - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr_with_dir = response.data - file_count_before = CatalogRecord.objects.get(identifier=cr_with_dir['identifier']).files.count() + file_count_before = CatalogRecord.objects.get( + identifier=cr_with_dir["identifier"] + ).files.count() # delete/unfreeze the files contained by described directory - deleted_file_ids = self._delete_files_from_directory_path('/TestExperiment/Directory_2/Group_2/Group_2_deeper') + deleted_file_ids = self._delete_files_from_directory_path( + "/TestExperiment/Directory_2/Group_2/Group_2_deeper" + ) # fix deprecated dataset - response = self.client.post('/rpc/datasets/fix_deprecated?identifier=%s' % cr_with_dir['identifier']) + response = self.client.post( + "/rpc/datasets/fix_deprecated?identifier=%s" % cr_with_dir["identifier"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure old dataset is unchanged - response = self.client.get('/rest/datasets/%s' % cr_with_dir['identifier']) + response = self.client.get("/rest/datasets/%s" % cr_with_dir["identifier"]) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data['research_dataset']['directories']), 1, - 'old dataset version directories should still contain the removed directory') - self.assertTrue(response.data['deprecated'], 'old dataset version deprecated flag should not be changed') + self.assertEqual( + len(response.data["research_dataset"]["directories"]), + 1, + "old dataset version directories should still contain the removed directory", + ) + self.assertTrue( + response.data["deprecated"], + "old dataset version deprecated flag should not be changed", + ) # ensure the new dataset is correct - new_cr_version = self._check_new_dataset_version(cr_with_dir['identifier'], file_count_before, - deleted_file_ids) - self.assertTrue('directories' not in new_cr_version['research_dataset']) + new_cr_version = self._check_new_dataset_version( + cr_with_dir["identifier"], file_count_before, deleted_file_ids + ) + self.assertTrue("directories" not in new_cr_version["research_dataset"]) def test_fix_deprecated_nested_directories_1(self): """ This test adds parent directory to dataset and then deletes all files from sub directory. research_dataset should be unchanged and file count should be smaller for new version. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr_before = response.data - file_count_before = CatalogRecord.objects.get(identifier=cr_before['identifier']).files.count() + file_count_before = CatalogRecord.objects.get( + identifier=cr_before["identifier"] + ).files.count() # delete/unfreeze the files contained by described directory - deleted_file_ids = self._delete_files_from_directory_path('/TestExperiment/Directory_2/Group_2/Group_2_deeper') + deleted_file_ids = self._delete_files_from_directory_path( + "/TestExperiment/Directory_2/Group_2/Group_2_deeper" + ) # fix deprecated dataset - response = self.client.post('/rpc/datasets/fix_deprecated?identifier=%s' % cr_before['identifier']) + response = self.client.post( + "/rpc/datasets/fix_deprecated?identifier=%s" % cr_before["identifier"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure the new dataset is correct - new_cr_version = self._check_new_dataset_version(cr_before['identifier'], file_count_before, deleted_file_ids) - self.assertEqual(cr_before['research_dataset'].get('files'), - new_cr_version['research_dataset'].get('files'), 'should be no difference in research_dataset.files') - self.assertEqual(cr_before['research_dataset'].get('directories'), - new_cr_version['research_dataset'].get('directories'), 'should be no difference in research_dataset.dirs') + new_cr_version = self._check_new_dataset_version( + cr_before["identifier"], file_count_before, deleted_file_ids + ) + self.assertEqual( + cr_before["research_dataset"].get("files"), + new_cr_version["research_dataset"].get("files"), + "should be no difference in research_dataset.files", + ) + self.assertEqual( + cr_before["research_dataset"].get("directories"), + new_cr_version["research_dataset"].get("directories"), + "should be no difference in research_dataset.dirs", + ) def test_fix_deprecated_nested_directories_2(self): """ This test adds parent and sub directory to dataset and then deletes all files from sub directory. research_dataset and file count should change for new version. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2') - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/Group_2_deeper") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr_before = response.data - file_count_before = CatalogRecord.objects.get(identifier=cr_before['identifier']).files.count() + file_count_before = CatalogRecord.objects.get( + identifier=cr_before["identifier"] + ).files.count() - deleted_file_ids = self._delete_files_from_directory_path('/TestExperiment/Directory_2/Group_2/Group_2_deeper') + deleted_file_ids = self._delete_files_from_directory_path( + "/TestExperiment/Directory_2/Group_2/Group_2_deeper" + ) # fix deprecated dataset - response = self.client.post('/rpc/datasets/fix_deprecated?identifier=%s' % cr_before['identifier']) + response = self.client.post( + "/rpc/datasets/fix_deprecated?identifier=%s" % cr_before["identifier"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure the new dataset is correct - new_cr_version = self._check_new_dataset_version(cr_before['identifier'], file_count_before, deleted_file_ids) + new_cr_version = self._check_new_dataset_version( + cr_before["identifier"], file_count_before, deleted_file_ids + ) # description field conveniently has the dir path for directories which are saved by _add_directory - rd_dirpaths = [ d['description'] for d in new_cr_version['research_dataset']['directories'] ] - self.assertTrue('/TestExperiment/Directory_2/Group_2/Group_2_deeper' not in rd_dirpaths) + rd_dirpaths = [d["description"] for d in new_cr_version["research_dataset"]["directories"]] + self.assertTrue("/TestExperiment/Directory_2/Group_2/Group_2_deeper" not in rd_dirpaths) def test_fix_deprecated_nested_directories_3(self): """ This test adds parent and sub directory to dataset and then deletes all files from sub directory. research_dataset and file count should change for new version. """ - self._add_directory(self.cr_test_data, '/TestExperiment/Directory_2/Group_2') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/file_09.txt') - self._add_file(self.cr_test_data, '/TestExperiment/Directory_2/Group_2/file_10.txt') - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2/Group_2") + self._add_file( + self.cr_test_data, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt", + ) + self._add_file( + self.cr_test_data, + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt", + ) + self._add_file(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/file_09.txt") + self._add_file(self.cr_test_data, "/TestExperiment/Directory_2/Group_2/file_10.txt") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr_before = response.data - file_count_before = CatalogRecord.objects.get(identifier=cr_before['identifier']).files.count() + file_count_before = CatalogRecord.objects.get( + identifier=cr_before["identifier"] + ).files.count() # delete/unfreeze the files contained by described directory - deleted_file_ids = self._delete_files_from_directory_path('/TestExperiment/Directory_2/Group_2/Group_2_deeper') + deleted_file_ids = self._delete_files_from_directory_path( + "/TestExperiment/Directory_2/Group_2/Group_2_deeper" + ) # fix deprecated dataset - response = self.client.post('/rpc/datasets/fix_deprecated?identifier=%s' % cr_before['identifier']) + response = self.client.post( + "/rpc/datasets/fix_deprecated?identifier=%s" % cr_before["identifier"] + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # ensure the new dataset is correct - new_cr_version = self._check_new_dataset_version(cr_before['identifier'], file_count_before, deleted_file_ids) - rd_dirpaths = [ d['description'] for d in new_cr_version['research_dataset']['directories'] ] - rd_filepaths = [ f['description'] for f in new_cr_version['research_dataset']['files'] ] - self.assertTrue('/TestExperiment/Directory_2/Group_2/Group_2_deeper' not in rd_dirpaths) - self.assertTrue('/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt' not in rd_filepaths) - self.assertTrue('/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt' not in rd_filepaths) + new_cr_version = self._check_new_dataset_version( + cr_before["identifier"], file_count_before, deleted_file_ids + ) + rd_dirpaths = [d["description"] for d in new_cr_version["research_dataset"]["directories"]] + rd_filepaths = [f["description"] for f in new_cr_version["research_dataset"]["files"]] + self.assertTrue("/TestExperiment/Directory_2/Group_2/Group_2_deeper" not in rd_dirpaths) + self.assertTrue( + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_11.txt" not in rd_filepaths + ) + self.assertTrue( + "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt" not in rd_filepaths + ) diff --git a/src/metax_api/tests/api/rpc/base/views/file_rpc.py b/src/metax_api/tests/api/rpc/base/views/file_rpc.py index ab780219..1e22da54 100755 --- a/src/metax_api/tests/api/rpc/base/views/file_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/file_rpc.py @@ -14,15 +14,15 @@ class FileRPCTests(APITestCase, TestClassUtils): - def setUp(self): """ Reloaded for every test case """ super().setUp() - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) self._use_http_authorization() + class DeleteProjectTests(FileRPCTests): """ @@ -32,39 +32,49 @@ class DeleteProjectTests(FileRPCTests): def test_wrong_parameters(self): # correct user, no project identifier - response = self.client.post('/rpc/files/delete_project') + response = self.client.post("/rpc/files/delete_project") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # nonexisting project identifier: - response = self.client.post('/rpc/files/delete_project?project_identifier=non_existing') - self.assertEqual(response.data['deleted_files_count'], 0) + response = self.client.post("/rpc/files/delete_project?project_identifier=non_existing") + self.assertEqual(response.data["deleted_files_count"], 0) # wrong request method - response = self.client.delete('/rpc/files/delete_project?project_identifier=research_project_112') + response = self.client.delete( + "/rpc/files/delete_project?project_identifier=research_project_112" + ) self.assertEqual(response.status_code, 501) # wrong user - self._use_http_authorization('api_auth_user') - response = self.client.post('/rpc/files/delete_project?project_identifier=research_project_112') + self._use_http_authorization("api_auth_user") + response = self.client.post( + "/rpc/files/delete_project?project_identifier=research_project_112" + ) # self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_known_project_identifier(self): - response = self.client.post('/rpc/files/delete_project?project_identifier=research_project_112') + response = self.client.post( + "/rpc/files/delete_project?project_identifier=research_project_112" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_files_are_marked_deleted(self): - files_count_before = File.objects.filter(project_identifier='research_project_112').count() - response = self.client.post('/rpc/files/delete_project?project_identifier=research_project_112') - self.assertEqual(files_count_before, response.data['deleted_files_count']) + files_count_before = File.objects.filter(project_identifier="research_project_112").count() + response = self.client.post( + "/rpc/files/delete_project?project_identifier=research_project_112" + ) + self.assertEqual(files_count_before, response.data["deleted_files_count"]) def test_directories_are_deleted(self): - self.client.post('/rpc/files/delete_project?project_identifier=research_project_112') - directories_count_after = Directory.objects.filter(project_identifier='research_project_112').count() + self.client.post("/rpc/files/delete_project?project_identifier=research_project_112") + directories_count_after = Directory.objects.filter( + project_identifier="research_project_112" + ).count() self.assertEqual(directories_count_after, 0) def test_datasets_are_marked_deprecated(self): - file_ids = File.objects.filter(project_identifier='project_x').values_list('id', flat=True) - related_dataset = CatalogRecord.objects.filter(files__in=file_ids).distinct('id')[0] - self.client.post('/rpc/files/delete_project?project_identifier=project_x') - response = self.client.get('/rest/datasets/%s' % related_dataset.identifier) - self.assertEqual(response.data['deprecated'], True) + file_ids = File.objects.filter(project_identifier="project_x").values_list("id", flat=True) + related_dataset = CatalogRecord.objects.filter(files__in=file_ids).distinct("id")[0] + self.client.post("/rpc/files/delete_project?project_identifier=project_x") + response = self.client.get("/rest/datasets/%s" % related_dataset.identifier) + self.assertEqual(response.data["deprecated"], True) diff --git a/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py b/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py index 623f4302..e330e325 100755 --- a/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py @@ -20,23 +20,22 @@ class StatisticRPCCommon(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ super().setUpClass() - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) def setUp(self): super().setUp() - self._use_http_authorization(username='metax') + self._use_http_authorization(username="metax") self._setup_testdata() def _setup_testdata(self): - test_orgs = ['org_1', 'org_2', 'org_3'] - access_types = [ v for k, v in ACCESS_TYPES.items() ] + test_orgs = ["org_1", "org_2", "org_3"] + access_types = [v for k, v in ACCESS_TYPES.items()] # ensure testdata has something sensible to test against. # todo may be feasible to move these to generate_test_data without breaking all tests ? @@ -53,72 +52,74 @@ def _setup_testdata(self): # distribute records between access types if cr.id % 5 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[4] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[4] elif cr.id % 4 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[3] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[3] elif cr.id % 3 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[2] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[2] elif cr.id % 2 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[1] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[1] else: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[0] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[0] # distribute records between some creation months - date = '20%s-%s-13' + date = "20%s-%s-13" if cr.id % 8 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '06')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "06")) elif cr.id % 7 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '07')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "07")) elif cr.id % 6 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '10')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "10")) elif cr.id % 5 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '11')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "11")) elif cr.id % 4 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '12')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "12")) elif cr.id % 3 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('19', '01')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("19", "01")) elif cr.id % 2 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('19', '02')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("19", "02")) else: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('19', '03')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("19", "03")) # set some records as "created through end user api" if cr.id % 10 == 0: cr.service_created = None - cr.user_created = 'abc%d@fairdataid' % cr.id + cr.user_created = "abc%d@fairdataid" % cr.id cr.force_save() # create a few files which do not belong to any datasets - response = self.client.get('/rest/files/1', format='json') + response = self.client.get("/rest/files/1", format="json") files = [] for i in range(5): f = deepcopy(response.data) - del f['id'] - f['identifier'] += 'unique' + str(i) - f['file_name'] += str(i) - f['file_path'] += str(i) - f['project_identifier'] = 'prj123' + del f["id"] + f["identifier"] += "unique" + str(i) + f["file_name"] += str(i) + f["file_path"] += str(i) + f["project_identifier"] = "prj123" files.append(f) - response = self.client.post('/rest/files', files, format='json') + response = self.client.post("/rest/files", files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.get('/rest/directories/update_byte_sizes_and_file_counts', format='json') + response = self.client.get( + "/rest/directories/update_byte_sizes_and_file_counts", format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/datasets/update_cr_total_files_byte_sizes', format='json') + response = self.client.get("/rest/datasets/update_cr_total_files_byte_sizes", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # create legacy datacatalog - dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema='ida').first() - dc.catalog_json['identifier'] = settings.LEGACY_CATALOGS[0] + dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema="ida").first() + dc.catalog_json["identifier"] = settings.LEGACY_CATALOGS[0] dc_json = { "catalog_record_services_create": "testuser,api_auth_user,metax", "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_json": dc.catalog_json + "catalog_json": dc.catalog_json, } - response = self.client.post('/rest/datacatalogs', dc_json, format="json") + response = self.client.post("/rest/datacatalogs", dc_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def _set_deprecated_dataset(self, id=1): @@ -136,16 +137,12 @@ def _create_legacy_dataset(self): Creates one new legacy dataset and returns its id """ legacy_dataset = { - "data_catalog": { - "identifier": settings.LEGACY_CATALOGS[0] - }, + "data_catalog": {"identifier": settings.LEGACY_CATALOGS[0]}, "metadata_owner_org": "some_org_id", "metadata_provider_org": "some_org_id", "metadata_provider_user": "some_user_id", "research_dataset": { - "title": { - "en": "Test Dataset Title" - }, + "title": {"en": "Test Dataset Title"}, "description": { "en": "A descriptive description describing the contents of this dataset. Must be descriptive." }, @@ -154,54 +151,44 @@ def _create_legacy_dataset(self): "identifier": "pid:urn:1", "title": "File Title", "description": "informative description", - "use_category": { - "identifier": "method" - } + "use_category": {"identifier": "method"}, }, { "identifier": "pid:urn:3", "title": "File Title", "description": "informative description", - "use_category": { - "identifier": "method" - } - } + "use_category": {"identifier": "method"}, + }, ], "creator": [ { "name": "Teppo Testaaja", "@type": "Person", "member_of": { - "name": { - "fi": "Testiorganisaatio" - }, - "@type": "Organization" - } + "name": {"fi": "Testiorganisaatio"}, + "@type": "Organization", + }, } ], "access_rights": { "access_type": { "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "pref_label": { - "fi": "Avoin", - "en": "Open", - "und": "Avoin" - }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type" + "pref_label": {"fi": "Avoin", "en": "Open", "und": "Avoin"}, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", } }, - "preferred_identifier": "uniikkinen_aidentifaijeri" - } + "preferred_identifier": "uniikkinen_aidentifaijeri", + }, } - response = self.client.post('/rest/datasets', legacy_dataset, format="json") + response = self.client.post("/rest/datasets", legacy_dataset, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data['id'] + return response.data["id"] def _create_new_dataset(self, dataset_json): - response = self.client.post('/rest/datasets', dataset_json, format="json") + response = self.client.post("/rest/datasets", dataset_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_cr_id = response.data['id'] + new_cr_id = response.data["id"] return new_cr_id @@ -210,16 +197,16 @@ def _create_new_dataset_version(self, id=1): Finds the latest version of given dataset, deletes one file from it and updates. Does not check if there are files to be deleted. """ - cr = self.client.get(f'/rest/datasets/{id}?include_legacy', format='json').data - while cr.get('next_dataset_version', False): - id = cr['next_dataset_version']['id'] - cr = self.client.get(f'/rest/datasets/{id}?include_legacy', format='json').data + cr = self.client.get(f"/rest/datasets/{id}?include_legacy", format="json").data + while cr.get("next_dataset_version", False): + id = cr["next_dataset_version"]["id"] + cr = self.client.get(f"/rest/datasets/{id}?include_legacy", format="json").data - cr['research_dataset']['files'].pop() - response = self.client.put(f'/rest/datasets/{id}?include_legacy', cr, format="json") + cr["research_dataset"]["files"].pop() + response = self.client.put(f"/rest/datasets/{id}?include_legacy", cr, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - return response.data['next_dataset_version']['id'] + return response.data["next_dataset_version"]["id"] def _set_dataset_creation_date(self, cr_id, date): """ @@ -234,10 +221,9 @@ def _get_catalog_record_size(self, id): """ Returns the size of given record id """ - size = CatalogRecord\ - .objects_unfiltered\ - .get(id=id)\ - .research_dataset.get('total_files_byte_size', 0) + size = CatalogRecord.objects_unfiltered.get(id=id).research_dataset.get( + "total_files_byte_size", 0 + ) return size @@ -246,7 +232,7 @@ def _get_byte_size_of_month(self, date): Returns the byte size for given date. date is in format 'YYYY-MM' """ query = CatalogRecord.objects_unfiltered.filter(date_created__startswith=date) - list_of_sizes = [ cr.research_dataset.get('total_files_byte_size', 0) for cr in query ] + list_of_sizes = [cr.research_dataset.get("total_files_byte_size", 0) for cr in query] return sum(list_of_sizes) @@ -255,7 +241,7 @@ def _get_total_byte_size(self): Returns byte size of all datasets in database """ query = CatalogRecord.objects_unfiltered.all() - list_of_sizes = [cr.research_dataset.get('total_files_byte_size', 0) for cr in query] + list_of_sizes = [cr.research_dataset.get("total_files_byte_size", 0) for cr in query] return sum(list_of_sizes) @@ -269,7 +255,9 @@ def _get_dataset_count_after(self, date): """ Return the total count after the date provided (inclusive). date is in format 'YYYY-MM-DD' """ - return CatalogRecord.objects_unfiltered.filter(date_created__gte=f'{date}T00:00:00+03:00').count() + return CatalogRecord.objects_unfiltered.filter( + date_created__gte=f"{date}T00:00:00+03:00" + ).count() def _get_total_dataset_count(self): """ @@ -284,7 +272,7 @@ def _set_cr_datacatalog(self, cr_id, catalog_id): def _set_dataset_as_draft(self, cr_id): cr = CatalogRecord.objects.get(pk=cr_id) - cr.state = 'draft' + cr.state = "draft" cr.force_save() def _set_cr_organization(self, cr_id, org): @@ -304,37 +292,39 @@ def test_count_datasets_single(self): Tests single parameters for api. Empty removed and legacy parameters returns true AND false matches """ total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get('/rpc/statistics/count_datasets').data - self.assertEqual(total_count, response['count'], response) + response = self.client.get("/rpc/statistics/count_datasets").data + self.assertEqual(total_count, response["count"], response) # test removed -parameter self._set_removed_dataset(id=2) - response = self.client.get('/rpc/statistics/count_datasets?removed=true').data - self.assertEqual(response['count'], 1, response) + response = self.client.get("/rpc/statistics/count_datasets?removed=true").data + self.assertEqual(response["count"], 1, response) - response = self.client.get('/rpc/statistics/count_datasets?removed=false').data - self.assertEqual(response['count'], total_count - 1, response) + response = self.client.get("/rpc/statistics/count_datasets?removed=false").data + self.assertEqual(response["count"], total_count - 1, response) # test legacy -parameter self._create_legacy_dataset() total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get('/rpc/statistics/count_datasets?legacy=true').data - self.assertEqual(response['count'], 1, response) + response = self.client.get("/rpc/statistics/count_datasets?legacy=true").data + self.assertEqual(response["count"], 1, response) - response = self.client.get('/rpc/statistics/count_datasets?legacy=false').data - self.assertEqual(response['count'], total_count - 1, response) + response = self.client.get("/rpc/statistics/count_datasets?legacy=false").data + self.assertEqual(response["count"], total_count - 1, response) # test latest -parameter self._create_new_dataset_version() self._create_new_dataset_version() total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get('/rpc/statistics/count_datasets?latest=false').data # returns all - self.assertEqual(response['count'], total_count, response) + response = self.client.get( + "/rpc/statistics/count_datasets?latest=false" + ).data # returns all + self.assertEqual(response["count"], total_count, response) - with_param = self.client.get('/rpc/statistics/count_datasets?latest=true').data - without_param = self.client.get('/rpc/statistics/count_datasets').data # default is true - self.assertEqual(with_param['count'], total_count - 2, with_param) - self.assertEqual(with_param['count'], without_param['count'], with_param) + with_param = self.client.get("/rpc/statistics/count_datasets?latest=true").data + without_param = self.client.get("/rpc/statistics/count_datasets").data # default is true + self.assertEqual(with_param["count"], total_count - 2, with_param) + self.assertEqual(with_param["count"], without_param["count"], with_param) def test_count_datasets_removed_latest(self): second_ver = self._create_new_dataset_version() @@ -343,21 +333,27 @@ def test_count_datasets_removed_latest(self): self._set_removed_dataset(id=second_ver) self._set_removed_dataset(id=2) - rem_lat = self.client.get('/rpc/statistics/count_datasets?removed=true&latest=true').data - rem_not_lat = self.client.get('/rpc/statistics/count_datasets?removed=true&latest=false').data + rem_lat = self.client.get("/rpc/statistics/count_datasets?removed=true&latest=true").data + rem_not_lat = self.client.get( + "/rpc/statistics/count_datasets?removed=true&latest=false" + ).data - self.assertEqual(rem_lat['count'], 1, 'Only latest versions should be checked') # id=2 - self.assertEqual(rem_not_lat['count'], 3, 'Only the prev versions should be removed') + self.assertEqual(rem_lat["count"], 1, "Only latest versions should be checked") # id=2 + self.assertEqual(rem_not_lat["count"], 3, "Only the prev versions should be removed") # create new dataset with 2 versions - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self._create_new_dataset_version(response.data['id']) + self._create_new_dataset_version(response.data["id"]) - not_rem_lat = self.client.get('/rpc/statistics/count_datasets?removed=false&latest=true').data - not_rem_not_lat = self.client.get('/rpc/statistics/count_datasets?removed=false&latest=false').data + not_rem_lat = self.client.get( + "/rpc/statistics/count_datasets?removed=false&latest=true" + ).data + not_rem_not_lat = self.client.get( + "/rpc/statistics/count_datasets?removed=false&latest=false" + ).data - self.assertEqual(not_rem_lat['count'], not_rem_not_lat['count'] - 1) + self.assertEqual(not_rem_lat["count"], not_rem_not_lat["count"] - 1) def test_count_datasets_removed_legacy(self): self._create_legacy_dataset() @@ -366,15 +362,21 @@ def test_count_datasets_removed_legacy(self): self._set_removed_dataset(leg_cr) total_count = CatalogRecord.objects_unfiltered.count() - rem_leg = self.client.get('/rpc/statistics/count_datasets?removed=true&legacy=true').data - rem_not_leg = self.client.get('/rpc/statistics/count_datasets?removed=true&legacy=false').data - not_rem_leg = self.client.get('/rpc/statistics/count_datasets?removed=false&legacy=true').data - not_rem_not_leg = self.client.get('/rpc/statistics/count_datasets?removed=false&legacy=false').data - - self.assertEqual(rem_leg['count'], 1) - self.assertEqual(rem_not_leg['count'], 0) - self.assertEqual(not_rem_leg['count'], 2) - self.assertEqual(not_rem_not_leg['count'], total_count - 3) + rem_leg = self.client.get("/rpc/statistics/count_datasets?removed=true&legacy=true").data + rem_not_leg = self.client.get( + "/rpc/statistics/count_datasets?removed=true&legacy=false" + ).data + not_rem_leg = self.client.get( + "/rpc/statistics/count_datasets?removed=false&legacy=true" + ).data + not_rem_not_leg = self.client.get( + "/rpc/statistics/count_datasets?removed=false&legacy=false" + ).data + + self.assertEqual(rem_leg["count"], 1) + self.assertEqual(rem_not_leg["count"], 0) + self.assertEqual(not_rem_leg["count"], 2) + self.assertEqual(not_rem_not_leg["count"], total_count - 3) def test_count_datasets_latest_legacy(self): leg_cr = self._create_legacy_dataset() @@ -382,46 +384,51 @@ def test_count_datasets_latest_legacy(self): self._create_new_dataset_version(leg_cr) total_count = CatalogRecord.objects_unfiltered.count() - leg_lat = self.client.get('/rpc/statistics/count_datasets?legacy=true&latest=true').data - leg_not_lat = self.client.get('/rpc/statistics/count_datasets?legacy=true&latest=false').data - not_leg_not_lat = self.client.get('/rpc/statistics/count_datasets?legacy=false&latest=false').data + leg_lat = self.client.get("/rpc/statistics/count_datasets?legacy=true&latest=true").data + leg_not_lat = self.client.get( + "/rpc/statistics/count_datasets?legacy=true&latest=false" + ).data + not_leg_not_lat = self.client.get( + "/rpc/statistics/count_datasets?legacy=false&latest=false" + ).data - self.assertEqual(leg_lat['count'], 1) - self.assertEqual(leg_not_lat['count'], 3) - self.assertEqual(not_leg_not_lat['count'], total_count - 3) + self.assertEqual(leg_lat["count"], 1) + self.assertEqual(leg_not_lat["count"], 3) + self.assertEqual(not_leg_not_lat["count"], total_count - 3) def test_count_datasets_from_date(self): total_count = CatalogRecord.objects_unfiltered.count() - june_count = self._get_dataset_count_of_month('2018-06') - july_count = self._get_dataset_count_of_month('2018-07') + june_count = self._get_dataset_count_of_month("2018-06") + july_count = self._get_dataset_count_of_month("2018-07") - res = self.client.get('/rpc/statistics/count_datasets?from_date=2018-07-01').data - self.assertEqual(res['count'], total_count - june_count) + res = self.client.get("/rpc/statistics/count_datasets?from_date=2018-07-01").data + self.assertEqual(res["count"], total_count - june_count) # datasets are created on 13th so this should include august count - res = self.client.get('/rpc/statistics/count_datasets?from_date=2018-08-13').data - self.assertEqual(res['count'], total_count - june_count - july_count) + res = self.client.get("/rpc/statistics/count_datasets?from_date=2018-08-13").data + self.assertEqual(res["count"], total_count - june_count - july_count) def test_count_datasets_to_date(self): total_count = CatalogRecord.objects_unfiltered.count() - after_jan_count = self._get_dataset_count_after('2019-01-01') - after_feb_count = self._get_dataset_count_after('2019-02-14') + after_jan_count = self._get_dataset_count_after("2019-01-01") + after_feb_count = self._get_dataset_count_after("2019-02-14") + + res = self.client.get("/rpc/statistics/count_datasets?to_date=2019-01-01").data + self.assertEqual(res["count"], total_count - after_jan_count) - res = self.client.get('/rpc/statistics/count_datasets?to_date=2019-01-01').data - self.assertEqual(res['count'], total_count - after_jan_count) + res = self.client.get("/rpc/statistics/count_datasets?to_date=2019-02-13").data + self.assertEqual(res["count"], total_count - after_feb_count) - res = self.client.get('/rpc/statistics/count_datasets?to_date=2019-02-13').data - self.assertEqual(res['count'], total_count - after_feb_count) class StatisticRPCAllDatasetsCumulative(StatisticRPCCommon, CatalogRecordApiWriteCommon): """ Test suite for all_datasets_cumulative. Test only optional parameters removed, legacy and latest for now. """ - url = '/rpc/statistics/all_datasets_cumulative' - dateparam_all = 'from_date=2018-06&to_date=2019-03' + url = "/rpc/statistics/all_datasets_cumulative" + dateparam_all = "from_date=2018-06&to_date=2019-03" def test_all_datasets_cumulative(self): """ @@ -435,50 +442,58 @@ def test_all_datasets_cumulative(self): """ # test bad query parameters - response = self.client.get(f'{self.url}') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'from_date and to_date are required') + response = self.client.get(f"{self.url}") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "from_date and to_date are required", + ) - response = self.client.get(f'{self.url}?from_date=2019-11&to_date=bad_parameter') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'date format is YYYY-MM') + response = self.client.get(f"{self.url}?from_date=2019-11&to_date=bad_parameter") + self.assertEqual( + response.status_code, status.HTTP_400_BAD_REQUEST, "date format is YYYY-MM" + ) - response = self.client.get(f'{self.url}?from_date=2019-11&to_date=2019-11-15') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'date format is YYYY-MM') + response = self.client.get(f"{self.url}?from_date=2019-11&to_date=2019-11-15") + self.assertEqual( + response.status_code, status.HTTP_400_BAD_REQUEST, "date format is YYYY-MM" + ) # test the basic functionality - june_size = self._get_byte_size_of_month('2018-06') - june_count = self._get_dataset_count_of_month('2018-06') + june_size = self._get_byte_size_of_month("2018-06") + june_count = self._get_dataset_count_of_month("2018-06") - july_size = self._get_byte_size_of_month('2018-07') - july_count = self._get_dataset_count_of_month('2018-07') + july_size = self._get_byte_size_of_month("2018-07") + july_count = self._get_dataset_count_of_month("2018-07") - march_size = self._get_byte_size_of_month('2019-03') - march_count = self._get_dataset_count_of_month('2019-03') + march_size = self._get_byte_size_of_month("2019-03") + march_count = self._get_dataset_count_of_month("2019-03") total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - response = self.client.get(f'{self.url}?{self.dateparam_all}').data + response = self.client.get(f"{self.url}?{self.dateparam_all}").data # ensure the counts and byte sizes are calculated correctly - self.assertEqual(response[0]['count'], june_count, response) - self.assertEqual(response[0]['ida_byte_size'], june_size, response) - self.assertEqual(response[0]['count_cumulative'], june_count, response) - self.assertEqual(response[0]['ida_byte_size_cumulative'], june_size, response) + self.assertEqual(response[0]["count"], june_count, response) + self.assertEqual(response[0]["ida_byte_size"], june_size, response) + self.assertEqual(response[0]["count_cumulative"], june_count, response) + self.assertEqual(response[0]["ida_byte_size_cumulative"], june_size, response) - self.assertEqual(response[1]['count'], july_count, response) - self.assertEqual(response[1]['ida_byte_size'], july_size, response) - self.assertEqual(response[1]['count_cumulative'], june_count + july_count, response) - self.assertEqual(response[1]['ida_byte_size_cumulative'], june_size + july_size, response) + self.assertEqual(response[1]["count"], july_count, response) + self.assertEqual(response[1]["ida_byte_size"], july_size, response) + self.assertEqual(response[1]["count_cumulative"], june_count + july_count, response) + self.assertEqual(response[1]["ida_byte_size_cumulative"], june_size + july_size, response) - self.assertEqual(response[-1]['count'], march_count, response) - self.assertEqual(response[-1]['ida_byte_size'], march_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size, response) + self.assertEqual(response[-1]["count"], march_count, response) + self.assertEqual(response[-1]["ida_byte_size"], march_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size, response) # test that only datasets from beginning of from_date is counted - response = self.client.get(f'{self.url}?from_date=2018-07&to_date=2019-03').data + response = self.client.get(f"{self.url}?from_date=2018-07&to_date=2019-03").data - self.assertEqual(response[-1]['count_cumulative'], total_count - june_count, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size - june_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count - june_count, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size - june_size, response) def test_all_datasets_cumulative_single(self): """ @@ -488,92 +503,116 @@ def test_all_datasets_cumulative_single(self): total_size = self._get_total_byte_size() # test removed -parameter - june_size = self._get_byte_size_of_month('2018-06') - june_count = self._get_dataset_count_of_month('2018-06') + june_size = self._get_byte_size_of_month("2018-06") + june_count = self._get_dataset_count_of_month("2018-06") - self._set_removed_dataset(id=8) # belongs to 2018-06, i.e. the first interval + self._set_removed_dataset(id=8) # belongs to 2018-06, i.e. the first interval removed_size = self._get_catalog_record_size(id=8) - response = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true").data # ensure that only the first month (2018-06) contains dataset and that cumulative is calculated correctly - self.assertEqual(response[0]['count'], 1, response) - self.assertEqual(response[0]['ida_byte_size'], removed_size, response) - self.assertEqual(response[0]['count_cumulative'], 1, response) - self.assertEqual(response[0]['ida_byte_size_cumulative'], removed_size, response) + self.assertEqual(response[0]["count"], 1, response) + self.assertEqual(response[0]["ida_byte_size"], removed_size, response) + self.assertEqual(response[0]["count_cumulative"], 1, response) + self.assertEqual(response[0]["ida_byte_size_cumulative"], removed_size, response) - self.assertEqual(response[-1]['count_cumulative'], 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], removed_size, response) + self.assertEqual(response[-1]["count_cumulative"], 1, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], removed_size, response) - response = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=false").data # ensure that the correct dataset is missing from results - self.assertEqual(response[0]['count'], june_count - 1, response) - self.assertEqual(response[0]['ida_byte_size'], june_size - removed_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count - 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size - removed_size, response) + self.assertEqual(response[0]["count"], june_count - 1, response) + self.assertEqual(response[0]["ida_byte_size"], june_size - removed_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count - 1, response) + self.assertEqual( + response[-1]["ida_byte_size_cumulative"], + total_size - removed_size, + response, + ) # test legacy -parameter - leg_cr = self._create_legacy_dataset() # legacy cr belongs to 2019-03, i.e. the last interval - self._set_dataset_creation_date(leg_cr, '2019-03-13') + leg_cr = ( + self._create_legacy_dataset() + ) # legacy cr belongs to 2019-03, i.e. the last interval + self._set_dataset_creation_date(leg_cr, "2019-03-13") legacy_size = self._get_catalog_record_size(id=leg_cr) total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - march_size = self._get_byte_size_of_month('2019-03') - march_count = self._get_dataset_count_of_month('2019-03') + march_size = self._get_byte_size_of_month("2019-03") + march_count = self._get_dataset_count_of_month("2019-03") - response = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=true').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true").data - self.assertEqual(response[-1]['count'], 1, response) - self.assertEqual(response[-1]['ida_byte_size'], legacy_size, response) - self.assertEqual(response[-1]['count_cumulative'], 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], legacy_size, response) + self.assertEqual(response[-1]["count"], 1, response) + self.assertEqual(response[-1]["ida_byte_size"], legacy_size, response) + self.assertEqual(response[-1]["count_cumulative"], 1, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], legacy_size, response) - response = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=false').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=false").data - self.assertEqual(response[-1]['count'], march_count - 1, response) - self.assertEqual(response[-1]['ida_byte_size'], march_size - legacy_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count - 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size - legacy_size, response) + self.assertEqual(response[-1]["count"], march_count - 1, response) + self.assertEqual(response[-1]["ida_byte_size"], march_size - legacy_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count - 1, response) + self.assertEqual( + response[-1]["ida_byte_size_cumulative"], total_size - legacy_size, response + ) # test latest -parameter # new versions will belong to 2019-03, i.e. the last interval second = self._create_new_dataset_version() - self._set_dataset_creation_date(second, '2019-03-17') + self._set_dataset_creation_date(second, "2019-03-17") old_ver_size = self._get_catalog_record_size(id=1) total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - march_size = self._get_byte_size_of_month('2019-03') - march_count = self._get_dataset_count_of_month('2019-03') - - response = self.client.get(f'{self.url}?{self.dateparam_all}&latest=false').data # returns all - self.assertEqual(response[-1]['count'], march_count, response) - self.assertEqual(response[-1]['ida_byte_size'], march_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size, response) - - with_param = self.client.get(f'{self.url}?{self.dateparam_all}&latest=true').data - self.assertEqual(with_param[-1]['count'], march_count - 1, with_param) - self.assertEqual(with_param[-1]['ida_byte_size'], march_size - old_ver_size, with_param) - self.assertEqual(with_param[-1]['count_cumulative'], total_count - 1, with_param) - self.assertEqual(with_param[-1]['ida_byte_size_cumulative'], total_size - old_ver_size, response) + march_size = self._get_byte_size_of_month("2019-03") + march_count = self._get_dataset_count_of_month("2019-03") + + response = self.client.get( + f"{self.url}?{self.dateparam_all}&latest=false" + ).data # returns all + self.assertEqual(response[-1]["count"], march_count, response) + self.assertEqual(response[-1]["ida_byte_size"], march_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size, response) + + with_param = self.client.get(f"{self.url}?{self.dateparam_all}&latest=true").data + self.assertEqual(with_param[-1]["count"], march_count - 1, with_param) + self.assertEqual(with_param[-1]["ida_byte_size"], march_size - old_ver_size, with_param) + self.assertEqual(with_param[-1]["count_cumulative"], total_count - 1, with_param) + self.assertEqual( + with_param[-1]["ida_byte_size_cumulative"], + total_size - old_ver_size, + response, + ) # ensure that default value(true) is working as expected - without_param = self.client.get(f'{self.url}?{self.dateparam_all}').data - self.assertEqual(with_param[-1]['count'], without_param[-1]['count'], with_param) - self.assertEqual(with_param[-1]['ida_byte_size'], without_param[-1]['ida_byte_size'], with_param) - self.assertEqual(with_param[-1]['count_cumulative'], without_param[-1]['count_cumulative'], with_param) + without_param = self.client.get(f"{self.url}?{self.dateparam_all}").data + self.assertEqual(with_param[-1]["count"], without_param[-1]["count"], with_param) + self.assertEqual( + with_param[-1]["ida_byte_size"], + without_param[-1]["ida_byte_size"], + with_param, + ) self.assertEqual( - with_param[-1]['ida_byte_size_cumulative'], without_param[-1]['ida_byte_size_cumulative'], with_param + with_param[-1]["count_cumulative"], + without_param[-1]["count_cumulative"], + with_param, + ) + self.assertEqual( + with_param[-1]["ida_byte_size_cumulative"], + without_param[-1]["ida_byte_size_cumulative"], + with_param, ) def test_all_datasets_cumulative_removed_latest(self): second = self._create_new_dataset_version() - self._set_dataset_creation_date(second, '2019-03-11') + self._set_dataset_creation_date(second, "2019-03-11") self._set_removed_dataset(id=1) self._set_removed_dataset(id=second) @@ -582,61 +621,81 @@ def test_all_datasets_cumulative_removed_latest(self): removed_size = self._get_catalog_record_size(id=1) + latest_size removed_count = 2 - rem_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&latest=true').data + rem_lat = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true&latest=true").data - self.assertEqual(rem_lat[-1]['count'], 1, rem_lat) # id=second - self.assertEqual(rem_lat[-1]['ida_byte_size'], latest_size, rem_lat) - self.assertEqual(rem_lat[-1]['count_cumulative'], 1, rem_lat) - self.assertEqual(rem_lat[-1]['ida_byte_size_cumulative'], latest_size, rem_lat) + self.assertEqual(rem_lat[-1]["count"], 1, rem_lat) # id=second + self.assertEqual(rem_lat[-1]["ida_byte_size"], latest_size, rem_lat) + self.assertEqual(rem_lat[-1]["count_cumulative"], 1, rem_lat) + self.assertEqual(rem_lat[-1]["ida_byte_size_cumulative"], latest_size, rem_lat) - rem_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&latest=false').data + rem_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=true&latest=false" + ).data - self.assertEqual(rem_not_lat[-1]['count'], removed_count, rem_not_lat) # id=second - self.assertEqual(rem_not_lat[-1]['ida_byte_size'], removed_size, rem_not_lat) - self.assertEqual(rem_not_lat[-1]['count_cumulative'], removed_count, rem_not_lat) - self.assertEqual(rem_not_lat[-1]['ida_byte_size_cumulative'], removed_size, rem_not_lat) + self.assertEqual(rem_not_lat[-1]["count"], removed_count, rem_not_lat) # id=second + self.assertEqual(rem_not_lat[-1]["ida_byte_size"], removed_size, rem_not_lat) + self.assertEqual(rem_not_lat[-1]["count_cumulative"], removed_count, rem_not_lat) + self.assertEqual(rem_not_lat[-1]["ida_byte_size_cumulative"], removed_size, rem_not_lat) # create new dataset with two versions, which will not be deleted new_cr_id = self._create_new_dataset(self.cr_test_data) - self._set_dataset_creation_date(new_cr_id, '2019-01-02') + self._set_dataset_creation_date(new_cr_id, "2019-01-02") new_cr_ver = self._create_new_dataset_version(new_cr_id) - self._set_dataset_creation_date(new_cr_ver, '2019-01-06') + self._set_dataset_creation_date(new_cr_ver, "2019-01-06") old_version_size = self._get_catalog_record_size(id=new_cr_id) - jan_count = self._get_dataset_count_of_month('2019-01') - jan_size = self._get_byte_size_of_month('2019-01') + jan_count = self._get_dataset_count_of_month("2019-01") + jan_size = self._get_byte_size_of_month("2019-01") total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - not_rem_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&latest=true').data + not_rem_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&latest=true" + ).data # missing the removed dataset from before and dataset id='new_cr_id' - self.assertEqual(not_rem_lat[-3]['count'], jan_count - 1, not_rem_lat) - self.assertEqual(not_rem_lat[-3]['ida_byte_size'], jan_size - old_version_size, not_rem_lat) - self.assertEqual(not_rem_lat[-1]['count_cumulative'], total_count - removed_count - 1, not_rem_lat) - self.assertEqual(not_rem_lat[-1]['ida_byte_size_cumulative'], total_size - removed_size - old_version_size, - not_rem_lat) + self.assertEqual(not_rem_lat[-3]["count"], jan_count - 1, not_rem_lat) + self.assertEqual(not_rem_lat[-3]["ida_byte_size"], jan_size - old_version_size, not_rem_lat) + self.assertEqual( + not_rem_lat[-1]["count_cumulative"], + total_count - removed_count - 1, + not_rem_lat, + ) + self.assertEqual( + not_rem_lat[-1]["ida_byte_size_cumulative"], + total_size - removed_size - old_version_size, + not_rem_lat, + ) - not_rem_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&latest=false').data + not_rem_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&latest=false" + ).data - self.assertEqual(not_rem_not_lat[-3]['count'], jan_count, not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-3]['ida_byte_size'], jan_size, not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-1]['count_cumulative'], total_count - removed_count, - not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-1]['ida_byte_size_cumulative'], total_size - removed_size, not_rem_not_lat) + self.assertEqual(not_rem_not_lat[-3]["count"], jan_count, not_rem_not_lat) + self.assertEqual(not_rem_not_lat[-3]["ida_byte_size"], jan_size, not_rem_not_lat) + self.assertEqual( + not_rem_not_lat[-1]["count_cumulative"], + total_count - removed_count, + not_rem_not_lat, + ) + self.assertEqual( + not_rem_not_lat[-1]["ida_byte_size_cumulative"], + total_size - removed_size, + not_rem_not_lat, + ) def test_all_datasets_cumulative_removed_legacy(self): leg_cr_1 = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr_1, '2018-07-03') + self._set_dataset_creation_date(leg_cr_1, "2018-07-03") leg_cr_2 = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr_2, '2019-02-08') + self._set_dataset_creation_date(leg_cr_2, "2019-02-08") self._set_removed_dataset(leg_cr_2) - self._set_removed_dataset(id=8) # belongs to first interval, i.e. 2018-06 + self._set_removed_dataset(id=8) # belongs to first interval, i.e. 2018-06 leg_non_rem_size = self._get_catalog_record_size(leg_cr_1) leg_removed_size = self._get_catalog_record_size(leg_cr_2) @@ -648,48 +707,70 @@ def test_all_datasets_cumulative_removed_legacy(self): total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - june_count = self._get_dataset_count_of_month('2018-06') - june_size = self._get_byte_size_of_month('2018-06') + june_count = self._get_dataset_count_of_month("2018-06") + june_size = self._get_byte_size_of_month("2018-06") - feb_count = self._get_dataset_count_of_month('2019-02') - feb_size = self._get_byte_size_of_month('2019-02') + feb_count = self._get_dataset_count_of_month("2019-02") + feb_size = self._get_byte_size_of_month("2019-02") - rem_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&legacy=true').data + rem_leg = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true&legacy=true").data - self.assertEqual(rem_leg[-2]['count'], 1, rem_leg) - self.assertEqual(rem_leg[-2]['ida_byte_size'], leg_removed_size, rem_leg) - self.assertEqual(rem_leg[-1]['count_cumulative'], 1, rem_leg) - self.assertEqual(rem_leg[-1]['ida_byte_size_cumulative'], leg_removed_size, rem_leg) + self.assertEqual(rem_leg[-2]["count"], 1, rem_leg) + self.assertEqual(rem_leg[-2]["ida_byte_size"], leg_removed_size, rem_leg) + self.assertEqual(rem_leg[-1]["count_cumulative"], 1, rem_leg) + self.assertEqual(rem_leg[-1]["ida_byte_size_cumulative"], leg_removed_size, rem_leg) - rem_not_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&legacy=false').data + rem_not_leg = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=true&legacy=false" + ).data - self.assertEqual(rem_not_leg[0]['count'], 1, rem_not_leg) - self.assertEqual(rem_not_leg[0]['ida_byte_size'], removed_size, rem_not_leg) - self.assertEqual(rem_not_leg[-1]['count_cumulative'], 1, rem_not_leg) - self.assertEqual(rem_not_leg[-1]['ida_byte_size_cumulative'], removed_size, rem_not_leg) + self.assertEqual(rem_not_leg[0]["count"], 1, rem_not_leg) + self.assertEqual(rem_not_leg[0]["ida_byte_size"], removed_size, rem_not_leg) + self.assertEqual(rem_not_leg[-1]["count_cumulative"], 1, rem_not_leg) + self.assertEqual(rem_not_leg[-1]["ida_byte_size_cumulative"], removed_size, rem_not_leg) - not_rem_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&legacy=true').data + not_rem_leg = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&legacy=true" + ).data - self.assertEqual(not_rem_leg[1]['count'], 1, not_rem_leg) - self.assertEqual(not_rem_leg[1]['ida_byte_size'], leg_non_rem_size, not_rem_leg) - self.assertEqual(not_rem_leg[-1]['count_cumulative'], 1, not_rem_leg) - self.assertEqual(not_rem_leg[-1]['ida_byte_size_cumulative'], leg_non_rem_size, not_rem_leg) + self.assertEqual(not_rem_leg[1]["count"], 1, not_rem_leg) + self.assertEqual(not_rem_leg[1]["ida_byte_size"], leg_non_rem_size, not_rem_leg) + self.assertEqual(not_rem_leg[-1]["count_cumulative"], 1, not_rem_leg) + self.assertEqual(not_rem_leg[-1]["ida_byte_size_cumulative"], leg_non_rem_size, not_rem_leg) - not_rem_not_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&legacy=false').data + not_rem_not_leg = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&legacy=false" + ).data - self.assertEqual(not_rem_not_leg[0]['count'], june_count - 1, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[0]['ida_byte_size'], june_size - removed_size, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-2]['count'], feb_count - 1, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-2]['ida_byte_size'], feb_size - leg_removed_size, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-1]['count_cumulative'], total_count - rem_leg_count, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-1]['ida_byte_size_cumulative'], total_size - rem_leg_size, not_rem_not_leg) + self.assertEqual(not_rem_not_leg[0]["count"], june_count - 1, not_rem_not_leg) + self.assertEqual( + not_rem_not_leg[0]["ida_byte_size"], + june_size - removed_size, + not_rem_not_leg, + ) + self.assertEqual(not_rem_not_leg[-2]["count"], feb_count - 1, not_rem_not_leg) + self.assertEqual( + not_rem_not_leg[-2]["ida_byte_size"], + feb_size - leg_removed_size, + not_rem_not_leg, + ) + self.assertEqual( + not_rem_not_leg[-1]["count_cumulative"], + total_count - rem_leg_count, + not_rem_not_leg, + ) + self.assertEqual( + not_rem_not_leg[-1]["ida_byte_size_cumulative"], + total_size - rem_leg_size, + not_rem_not_leg, + ) def test_all_datasets_cumulative_latest_legacy(self): leg_cr = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr, '2019-03-08') + self._set_dataset_creation_date(leg_cr, "2019-03-08") second = self._create_new_dataset_version(leg_cr) - self._set_dataset_creation_date(second, '2019-03-12') + self._set_dataset_creation_date(second, "2019-03-12") leg_cr_size = self._get_catalog_record_size(id=leg_cr) second_size = self._get_catalog_record_size(id=second) @@ -700,113 +781,154 @@ def test_all_datasets_cumulative_latest_legacy(self): total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - march_count = self._get_dataset_count_of_month('2019-03') - march_size = self._get_byte_size_of_month('2019-03') + march_count = self._get_dataset_count_of_month("2019-03") + march_size = self._get_byte_size_of_month("2019-03") - leg_lat = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=true&latest=true').data + leg_lat = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true&latest=true").data - self.assertEqual(leg_lat[-1]['count'], 1, leg_lat) - self.assertEqual(leg_lat[-1]['ida_byte_size'], second_size, leg_lat) - self.assertEqual(leg_lat[-1]['count_cumulative'], 1, leg_lat) - self.assertEqual(leg_lat[-1]['ida_byte_size_cumulative'], second_size, leg_lat) + self.assertEqual(leg_lat[-1]["count"], 1, leg_lat) + self.assertEqual(leg_lat[-1]["ida_byte_size"], second_size, leg_lat) + self.assertEqual(leg_lat[-1]["count_cumulative"], 1, leg_lat) + self.assertEqual(leg_lat[-1]["ida_byte_size_cumulative"], second_size, leg_lat) - leg_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=true&latest=false').data + leg_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&legacy=true&latest=false" + ).data - self.assertEqual(leg_not_lat[-1]['count'], legacy_count, leg_not_lat) - self.assertEqual(leg_not_lat[-1]['ida_byte_size'], legacy_size, leg_not_lat) - self.assertEqual(leg_not_lat[-1]['count_cumulative'], legacy_count, leg_not_lat) - self.assertEqual(leg_not_lat[-1]['ida_byte_size_cumulative'], legacy_size, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["count"], legacy_count, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["ida_byte_size"], legacy_size, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["count_cumulative"], legacy_count, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["ida_byte_size_cumulative"], legacy_size, leg_not_lat) - not_leg_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=false&latest=false').data + not_leg_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&legacy=false&latest=false" + ).data - self.assertEqual(not_leg_not_lat[-1]['count'], march_count - legacy_count, not_leg_not_lat) - self.assertEqual(not_leg_not_lat[-1]['ida_byte_size'], march_size - legacy_size, not_leg_not_lat) - self.assertEqual(not_leg_not_lat[-1]['count_cumulative'], total_count - legacy_count, not_leg_not_lat) - self.assertEqual(not_leg_not_lat[-1]['ida_byte_size_cumulative'], total_size - legacy_size, not_leg_not_lat) + self.assertEqual(not_leg_not_lat[-1]["count"], march_count - legacy_count, not_leg_not_lat) + self.assertEqual( + not_leg_not_lat[-1]["ida_byte_size"], + march_size - legacy_size, + not_leg_not_lat, + ) + self.assertEqual( + not_leg_not_lat[-1]["count_cumulative"], + total_count - legacy_count, + not_leg_not_lat, + ) + self.assertEqual( + not_leg_not_lat[-1]["ida_byte_size_cumulative"], + total_size - legacy_size, + not_leg_not_lat, + ) class StatisticRPCforDrafts(StatisticRPCCommon, CatalogRecordApiWriteCommon): """ Tests that drafts are not taken into account when calculating statistics """ + def test_count_datasets_api_for_drafts(self): """ Tests that rpc/statistics/count_datasets returns only count of published datasets """ - response_1 = self.client.get('/rpc/statistics/count_datasets').data + response_1 = self.client.get("/rpc/statistics/count_datasets").data self._set_dataset_as_draft(1) - self.assertEqual(CatalogRecord.objects.get(pk=1).state, 'draft', - 'Dataset with id=1 should have changed state to draft') + self.assertEqual( + CatalogRecord.objects.get(pk=1).state, + "draft", + "Dataset with id=1 should have changed state to draft", + ) - response_2 = self.client.get('/rpc/statistics/count_datasets').data - self.assertNotEqual(response_1['count'], response_2['count'], - 'Drafts should not be returned in count_datasets api') + response_2 = self.client.get("/rpc/statistics/count_datasets").data + self.assertNotEqual( + response_1["count"], + response_2["count"], + "Drafts should not be returned in count_datasets api", + ) def test_all_datasets_cumulative_for_drafts(self): """ Tests that /rpc/statistics/all_datasets_cumulative returns only published datasets """ - url = '/rpc/statistics/all_datasets_cumulative?from_date=2019-06&to_date=2019-06' + url = "/rpc/statistics/all_datasets_cumulative?from_date=2019-06&to_date=2019-06" - self._set_dataset_creation_date(1, '2019-06-15') + self._set_dataset_creation_date(1, "2019-06-15") response_1 = self.client.get(url).data self._set_dataset_as_draft(1) response_2 = self.client.get(url).data # ensure the counts and byte sizes are calculated without drafts - self.assertNotEqual(response_1[0]['count'], response_2[0]['count'], - 'Count for June should reduce by one as dataset id=1 was set as draft') - self.assertNotEqual(response_1[0]['ida_byte_size'], response_2[0]['ida_byte_size'], - 'Byte size for June should reduce by one as dataset id=1 was set as draft') + self.assertNotEqual( + response_1[0]["count"], + response_2[0]["count"], + "Count for June should reduce by one as dataset id=1 was set as draft", + ) + self.assertNotEqual( + response_1[0]["ida_byte_size"], + response_2[0]["ida_byte_size"], + "Byte size for June should reduce by one as dataset id=1 was set as draft", + ) def test_catalog_datasets_cumulative_for_drafts(self): """ Tests that /rpc/statistics/catalog_datasets_cumulative returns only published datasets """ - url = '/rpc/statistics/catalog_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30' + url = "/rpc/statistics/catalog_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" catalog = "urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d3" - self._set_dataset_creation_date(1, '2019-06-15') - self._set_cr_datacatalog(1, catalog) # Adds id=1 to catalog + self._set_dataset_creation_date(1, "2019-06-15") + self._set_cr_datacatalog(1, catalog) # Adds id=1 to catalog - count_1 = self.client.get(url).data[catalog]['open'][0]['count'] - total_1 = self.client.get(url).data[catalog]['total'] + count_1 = self.client.get(url).data[catalog]["open"][0]["count"] + total_1 = self.client.get(url).data[catalog]["total"] self._set_dataset_as_draft(1) - count_2 = self.client.get(url).data[catalog]['open'][0]['count'] - total_2 = self.client.get(url).data[catalog]['total'] + count_2 = self.client.get(url).data[catalog]["open"][0]["count"] + total_2 = self.client.get(url).data[catalog]["total"] # ensure the count and total are calculated without drafts - self.assertNotEqual(count_1, count_2, 'Count should reduce by one as dataset id=1 was set as draft') - self.assertNotEqual(total_1, total_2, 'Total should reduce by one as dataset id=1 was set as draft') + self.assertNotEqual( + count_1, + count_2, + "Count should reduce by one as dataset id=1 was set as draft", + ) + self.assertNotEqual( + total_1, + total_2, + "Total should reduce by one as dataset id=1 was set as draft", + ) def test_end_user_datasets_cumulative_for_drafts(self): - ''' End user api should return only published data ''' - url = '/rpc/statistics/end_user_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30' + """ End user api should return only published data """ + url = "/rpc/statistics/end_user_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - self._set_dataset_creation_date(10, '2019-06-15') - count_1 = self.client.get(url).data[0]['count'] + self._set_dataset_creation_date(10, "2019-06-15") + count_1 = self.client.get(url).data[0]["count"] self._set_dataset_as_draft(10) - count_2 = self.client.get(url).data[0]['count'] + count_2 = self.client.get(url).data[0]["count"] # ensure the count are calculated without drafts - self.assertNotEqual(count_1, count_2, 'Count should be reduced by one after setting id=10 as draft') + self.assertNotEqual( + count_1, + count_2, + "Count should be reduced by one after setting id=10 as draft", + ) def test_organization_datasets_cumulative_for_drafts(self): - ''' Organization api should return only published data ''' + """ Organization api should return only published data """ url = "/rpc/statistics/organization_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - self._set_dataset_creation_date(1, '2019-06-15') - self._set_cr_organization(1, 'org_2') - total_1 = self.client.get(url).data['org_2']['total'] + self._set_dataset_creation_date(1, "2019-06-15") + self._set_cr_organization(1, "org_2") + total_1 = self.client.get(url).data["org_2"]["total"] self._set_dataset_as_draft(1) - total_2 = self.client.get(url).data['org_2']['total'] + total_2 = self.client.get(url).data["org_2"]["total"] # ensure the totals are calculated without drafts - self.assertNotEqual(total_1, total_2, 'Count be reduced by one after setting id=1 as draft') \ No newline at end of file + self.assertNotEqual(total_1, total_2, "Count be reduced by one after setting id=1 as draft") diff --git a/src/metax_api/tests/api/rpc/v2/views/common_rpc.py b/src/metax_api/tests/api/rpc/v2/views/common_rpc.py index be010ab5..5a56dd59 100755 --- a/src/metax_api/tests/api/rpc/v2/views/common_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/common_rpc.py @@ -12,12 +12,11 @@ class CommonRPCTests(APITestCase, TestClassUtils): - def test_list_valid_methods(self): """ When an invalid (or mistyped) method name is attempted, the api should list valid methods names for that RPC endpoint. """ - response = self.client.get('/rpc/v2/datasets/nonexisting') + response = self.client.get("/rpc/v2/datasets/nonexisting") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual('methods are: ' in response.data['detail'][0], True, response.content) + self.assertEqual("methods are: " in response.data["detail"][0], True, response.content) diff --git a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py index b85a949c..d04adf54 100755 --- a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py @@ -19,14 +19,13 @@ class DatasetRPCTests(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ super().setUpClass() - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) def setUp(self): super().setUp() @@ -39,81 +38,87 @@ def test_get_minimal_dataset_template(self): """ # query param type is missing, should return error and description what to do. - response = self.client.get('/rpc/v2/datasets/get_minimal_dataset_template') + response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # test preventing typos - response = self.client.get('/rpc/v2/datasets/get_minimal_dataset_template?type=wrong') + response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template?type=wrong") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # test minimal dataset for service use - response = self.client.get('/rpc/v2/datasets/get_minimal_dataset_template?type=service') + response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template?type=service") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue('metadata_provider_org' in response.data) - self.assertTrue('metadata_provider_user' in response.data) - self._use_http_authorization(username='testuser') - response = self.client.post('/rest/v2/datasets', response.data, format="json") + self.assertTrue("metadata_provider_org" in response.data) + self.assertTrue("metadata_provider_user" in response.data) + self._use_http_authorization(username="testuser") + response = self.client.post("/rest/v2/datasets", response.data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) # test minimal dataset for end user use - response = self.client.get('/rpc/v2/datasets/get_minimal_dataset_template?type=enduser') + response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template?type=enduser") self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue('metadata_provider_org' not in response.data) - self.assertTrue('metadata_provider_user' not in response.data) - self._use_http_authorization(method='bearer', token=get_test_oidc_token()) + self.assertTrue("metadata_provider_org" not in response.data) + self.assertTrue("metadata_provider_user" not in response.data) + self._use_http_authorization(method="bearer", token=get_test_oidc_token()) self._mock_token_validation_succeeds() - response = self.client.post('/rest/v2/datasets', response.data, format="json") + response = self.client.post("/rest/v2/datasets", response.data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_set_preservation_identifier(self): - self._set_http_authorization('service') + self._set_http_authorization("service") # Parameter 'identifier' is required - response = self.client.post('/rpc/v2/datasets/set_preservation_identifier') + response = self.client.post("/rpc/v2/datasets/set_preservation_identifier") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # Nonexisting identifier should return 404 - response = self.client.post('/rpc/v2/datasets/set_preservation_identifier?identifier=nonexisting') + response = self.client.post( + "/rpc/v2/datasets/set_preservation_identifier?identifier=nonexisting" + ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) # Create ida data catalog - dc = self._get_object_from_test_data('datacatalog', requested_index=0) + dc = self._get_object_from_test_data("datacatalog", requested_index=0) dc_id = settings.IDA_DATA_CATALOG_IDENTIFIER - dc['catalog_json']['identifier'] = dc_id - self.client.post('/rest/v2/datacatalogs', dc, format="json") + dc["catalog_json"]["identifier"] = dc_id + self.client.post("/rest/v2/datacatalogs", dc, format="json") # Test OK ops # Create new ida cr without doi - cr_json = self.client.get('/rest/v2/datasets/1').data - cr_json.pop('preservation_identifier', None) - cr_json.pop('identifier') - cr_json['research_dataset'].pop('preferred_identifier', None) - cr_json['data_catalog'] = dc_id - cr_json['research_dataset']['issued'] = '2018-01-01' - cr_json['research_dataset']['publisher'] = { - '@type': 'Organization', - 'name': { 'en': 'publisher' } + cr_json = self.client.get("/rest/v2/datasets/1").data + cr_json.pop("preservation_identifier", None) + cr_json.pop("identifier") + cr_json["research_dataset"].pop("preferred_identifier", None) + cr_json["data_catalog"] = dc_id + cr_json["research_dataset"]["issued"] = "2018-01-01" + cr_json["research_dataset"]["publisher"] = { + "@type": "Organization", + "name": {"en": "publisher"}, } - response = self.client.post('/rest/v2/datasets?pid_type=urn', cr_json, format="json") + response = self.client.post("/rest/v2/datasets?pid_type=urn", cr_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - identifier = response.data['identifier'] + identifier = response.data["identifier"] # Verify rpc api returns the same doi as the one that is set to the datasets' preservation identifier - response = self.client.post(f'/rpc/v2/datasets/set_preservation_identifier?identifier={identifier}') + response = self.client.post( + f"/rpc/v2/datasets/set_preservation_identifier?identifier={identifier}" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response2 = self.client.get(f'/rest/v2/datasets/{identifier}') + response2 = self.client.get(f"/rest/v2/datasets/{identifier}") self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) - self.assertEqual(response.data, response2.data['preservation_identifier'], response2.data) + self.assertEqual(response.data, response2.data["preservation_identifier"], response2.data) # Return 400 if request is not correct datacite format - response2.data['research_dataset'].pop('issued') - response = self.client.put(f'/rest/v2/datasets/{identifier}', response2.data, format="json") + response2.data["research_dataset"].pop("issued") + response = self.client.put(f"/rest/v2/datasets/{identifier}", response2.data, format="json") self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) - response = self.client.post(f'/rpc/v2/datasets/set_preservation_identifier?identifier={identifier}') + response = self.client.post( + f"/rpc/v2/datasets/set_preservation_identifier?identifier={identifier}" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) @@ -125,40 +130,49 @@ class ChangeCumulativeStateRPC(CatalogRecordApiWriteCommon): """ def _create_cumulative_dataset(self, state): - self.cr_test_data['cumulative_state'] = state + self.cr_test_data["cumulative_state"] = state - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data['cumulative_state'], state, response.data) + self.assertEqual(response.data["cumulative_state"], state, response.data) return response.data def _update_cr_cumulative_state(self, identifier, state, result=status.HTTP_204_NO_CONTENT): response = self.client.post( - '/rpc/v2/datasets/change_cumulative_state?identifier=%s&cumulative_state=%d' % (identifier, state), - format="json" + "/rpc/v2/datasets/change_cumulative_state?identifier=%s&cumulative_state=%d" + % (identifier, state), + format="json", ) self.assertEqual(response.status_code, result, response.data) return response.data def _get_cr(self, identifier): - response = self.client.get('/rest/v2/datasets/%s' % identifier, format="json") + response = self.client.get("/rest/v2/datasets/%s" % identifier, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return response.data def _assert_file_counts(self, new_version): - new_count = CatalogRecordV2.objects.get(pk=new_version['id']).files.count() - old_count = CatalogRecordV2.objects.get(pk=new_version['previous_dataset_version']['id']).files.count() - self.assertEqual(new_count, old_count, 'file count between versions should match') + new_count = CatalogRecordV2.objects.get(pk=new_version["id"]).files.count() + old_count = CatalogRecordV2.objects.get( + pk=new_version["previous_dataset_version"]["id"] + ).files.count() + self.assertEqual(new_count, old_count, "file count between versions should match") def test_transitions_from_NO(self): """ If dataset is published, and it has files, state can't be changed from NO to anything. """ cr_orig = self._create_cumulative_dataset(0) - self._update_cr_cumulative_state(cr_orig['identifier'], CR.CUMULATIVE_STATE_YES, status.HTTP_400_BAD_REQUEST) - self._update_cr_cumulative_state(cr_orig['identifier'], CR.CUMULATIVE_STATE_CLOSED, status.HTTP_400_BAD_REQUEST) + self._update_cr_cumulative_state( + cr_orig["identifier"], CR.CUMULATIVE_STATE_YES, status.HTTP_400_BAD_REQUEST + ) + self._update_cr_cumulative_state( + cr_orig["identifier"], + CR.CUMULATIVE_STATE_CLOSED, + status.HTTP_400_BAD_REQUEST, + ) def test_transitions_from_YES(self): """ @@ -166,13 +180,20 @@ def test_transitions_from_YES(self): """ cr = self._create_cumulative_dataset(1) orig_record_count = CatalogRecordV2.objects.all().count() - self._update_cr_cumulative_state(cr['identifier'], CR.CUMULATIVE_STATE_NO, status.HTTP_400_BAD_REQUEST) + self._update_cr_cumulative_state( + cr["identifier"], CR.CUMULATIVE_STATE_NO, status.HTTP_400_BAD_REQUEST + ) self.assertEqual(CatalogRecordV2.objects.all().count(), orig_record_count) # active to non-active cumulation is legal - self._update_cr_cumulative_state(cr['identifier'], CR.CUMULATIVE_STATE_CLOSED) - cr = self._get_cr(cr['identifier']) - self.assertEqual(cr['cumulative_state'], CR.CUMULATIVE_STATE_CLOSED, 'dataset should have changed status') + self._update_cr_cumulative_state(cr["identifier"], CR.CUMULATIVE_STATE_CLOSED) + cr = self._get_cr(cr["identifier"]) + self.assertEqual( + cr["cumulative_state"], + CR.CUMULATIVE_STATE_CLOSED, + "dataset should have changed status", + ) + class CatalogRecordVersionHandling(CatalogRecordApiWriteCommon): @@ -185,49 +206,67 @@ def test_create_new_version(self): """ A new dataset version can be created for datasets in data catalogs that support versioning. """ - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - next_version_identifier = response.data.get('identifier') + next_version_identifier = response.data.get("identifier") - response = self.client.get('/rest/v2/datasets/1', format="json") + response = self.client.get("/rest/v2/datasets/1", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('next_dataset_version', {}).get('identifier'), next_version_identifier) + self.assertEqual( + response.data.get("next_dataset_version", {}).get("identifier"), + next_version_identifier, + ) - response2 = self.client.get('/rest/v2/datasets/%s' % next_version_identifier, format="json") + response2 = self.client.get("/rest/v2/datasets/%s" % next_version_identifier, format="json") self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) self.assertEqual( - response2.data.get('previous_dataset_version', {}).get('identifier'), response.data['identifier'] + response2.data.get("previous_dataset_version", {}).get("identifier"), + response.data["identifier"], ) def test_delete_new_version_draft(self): """ Ensure a new version that is created into draft state can be deleted, and is permanently deleted. """ - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - next_version_identifier = response.data.get('identifier') + next_version_identifier = response.data.get("identifier") - response = self.client.delete('/rest/v2/datasets/%s' % next_version_identifier, format="json") + response = self.client.delete( + "/rest/v2/datasets/%s" % next_version_identifier, format="json" + ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - num_found = CatalogRecordV2.objects_unfiltered.filter(identifier=next_version_identifier).count() - self.assertEqual(num_found, 0, 'draft should have been permanently deleted') + num_found = CatalogRecordV2.objects_unfiltered.filter( + identifier=next_version_identifier + ).count() + self.assertEqual(num_found, 0, "draft should have been permanently deleted") self.assertEqual(CatalogRecordV2.objects.get(pk=1).next_dataset_version, None) def test_version_already_exists(self): """ If a dataset already has a next version, then a new version cannot be created. """ - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) response = self.client.post( - '/rpc/v2/datasets/create_new_version?identifier=1', format="json" + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual('already has a next version' in response.data['detail'][0], True, response.data) + self.assertEqual( + "already has a next version" in response.data["detail"][0], + True, + response.data, + ) def test_new_version_removes_deprecated_files(self): """ @@ -236,40 +275,48 @@ def test_new_version_removes_deprecated_files(self): """ original_cr = CatalogRecordV2.objects.get(pk=1) - response = self.client.delete('/rest/v2/files/1', format="json") + response = self.client.delete("/rest/v2/files/1", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_cr = CatalogRecordV2.objects.get(pk=response.data['id']) + new_cr = CatalogRecordV2.objects.get(pk=response.data["id"]) self.assertEqual(new_cr.deprecated, False) - self.assertTrue(len(new_cr.research_dataset['files']) < len(original_cr.research_dataset['files'])) - self.assertTrue(new_cr.files.count() < original_cr.files(manager='objects_unfiltered').count()) + self.assertTrue( + len(new_cr.research_dataset["files"]) < len(original_cr.research_dataset["files"]) + ) + self.assertTrue( + new_cr.files.count() < original_cr.files(manager="objects_unfiltered").count() + ) def test_version_from_draft(self): """ New versions cannot be created from drafts """ - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - dft_id = response.data['identifier'] + dft_id = response.data["identifier"] - response = self.client.post(f'/rpc/v2/datasets/create_new_version?identifier={dft_id}', format="json") + response = self.client.post( + f"/rpc/v2/datasets/create_new_version?identifier={dft_id}", format="json" + ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('draft' in response.data['detail'][0], response.data) + self.assertTrue("draft" in response.data["detail"][0], response.data) def test_draft_blocks_version_creation(self): """ Don't allow new versions if there are unmerged drafts for a dataset """ - response = self.client.post('/rpc/v2/datasets/create_draft?identifier=1') + response = self.client.post("/rpc/v2/datasets/create_draft?identifier=1") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1') + response = self.client.post("/rpc/v2/datasets/create_new_version?identifier=1") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue('unmerged draft' in response.data['detail'][0], response.data) + self.assertTrue("unmerged draft" in response.data["detail"][0], response.data) @responses.activate def test_authorization(self): @@ -278,45 +325,54 @@ def test_authorization(self): """ # service use should be OK - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=2', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=2", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) # test with end user, should fail self.token = get_test_oidc_token(new_proxy=True) self._mock_token_validation_succeeds() - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) # change owner, try again. should be OK cr = CatalogRecordV2.objects.get(pk=1) - cr.metadata_provider_user = self.token['CSCUserName'] + cr.metadata_provider_user = self.token["CSCUserName"] cr.editor = None cr.force_save() - response = self.client.post('/rpc/v2/datasets/create_new_version?identifier=1', format="json") + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=1", format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) class CatalogRecordPublishing(CatalogRecordApiWriteCommon): - def test_publish_new_dataset_draft(self): - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data['id'] + cr_id = response.data["id"] - response = self.client.post('/rpc/v2/datasets/publish_dataset?identifier=%d' % cr_id, format="json") + response = self.client.post( + "/rpc/v2/datasets/publish_dataset?identifier=%d" % cr_id, format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data.get('preferred_identifier') is not None) + self.assertTrue(response.data.get("preferred_identifier") is not None) - response = self.client.get('/rest/v2/datasets/%d' % cr_id, format="json") + response = self.client.get("/rest/v2/datasets/%d" % cr_id, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data['state'], 'published') + self.assertEqual(response.data["state"], "published") self.assertEqual( - response.data['research_dataset']['preferred_identifier'] == response.data['identifier'], False + response.data["research_dataset"]["preferred_identifier"] + == response.data["identifier"], + False, ) @responses.activate @@ -328,36 +384,37 @@ def test_authorization(self): """ # test with service - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) response = self.client.post( - '/rpc/v2/datasets/publish_dataset?identifier=%d' % response.data['id'], format="json" + "/rpc/v2/datasets/publish_dataset?identifier=%d" % response.data["id"], + format="json", ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # test with end user self.token = get_test_oidc_token(new_proxy=True) self._mock_token_validation_succeeds() - self._use_http_authorization(method='bearer', token=self.token) + self._use_http_authorization(method="bearer", token=self.token) self.create_end_user_data_catalogs() - self.cr_test_data['data_catalog'] = settings.END_USER_ALLOWED_DATA_CATALOGS[0] - self.cr_test_data['research_dataset'].pop('files', None) - self.cr_test_data['research_dataset'].pop('directories', None) + self.cr_test_data["data_catalog"] = settings.END_USER_ALLOWED_DATA_CATALOGS[0] + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) - response = self.client.post('/rest/v2/datasets?draft', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) response = self.client.post( - '/rpc/v2/datasets/publish_dataset?identifier=%d' % response.data['id'], format="json" + "/rpc/v2/datasets/publish_dataset?identifier=%d" % response.data["id"], + format="json", ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) class CatalogRecordV1APIs(CatalogRecordApiWriteCommon): - def test_ensure_v1_apis_dont_work(self): - for endpoint in ('fix_deprecated', 'refresh_directory_content'): - response = self.client.post(f'/rpc/v2/datasets/{endpoint}', format="json") + for endpoint in ("fix_deprecated", "refresh_directory_content"): + response = self.client.post(f"/rpc/v2/datasets/{endpoint}", format="json") self.assertEqual(response.status_code, 501, response.data) diff --git a/src/metax_api/tests/api/rpc/v2/views/file_rpc.py b/src/metax_api/tests/api/rpc/v2/views/file_rpc.py index fce29e46..eb09a185 100755 --- a/src/metax_api/tests/api/rpc/v2/views/file_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/file_rpc.py @@ -14,15 +14,15 @@ class FileRPCTests(APITestCase, TestClassUtils): - def setUp(self): """ Reloaded for every test case """ super().setUp() - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) self._use_http_authorization() + class DeleteProjectTests(FileRPCTests): """ @@ -32,42 +32,52 @@ class DeleteProjectTests(FileRPCTests): def test_wrong_parameters(self): # correct user, no project identifier - response = self.client.post('/rpc/v2/files/delete_project') + response = self.client.post("/rpc/v2/files/delete_project") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # nonexisting project identifier: - response = self.client.post('/rpc/v2/files/delete_project?project_identifier=non_existing') - self.assertEqual(response.data['deleted_files_count'], 0) + response = self.client.post("/rpc/v2/files/delete_project?project_identifier=non_existing") + self.assertEqual(response.data["deleted_files_count"], 0) # wrong request method - response = self.client.delete('/rpc/v2/files/delete_project?project_identifier=research_project_112') + response = self.client.delete( + "/rpc/v2/files/delete_project?project_identifier=research_project_112" + ) self.assertEqual(response.status_code, 501) # wrong user - self._use_http_authorization('api_auth_user') - response = self.client.post('/rpc/v2/files/delete_project?project_identifier=research_project_112') + self._use_http_authorization("api_auth_user") + response = self.client.post( + "/rpc/v2/files/delete_project?project_identifier=research_project_112" + ) # self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_known_project_identifier(self): - response = self.client.post('/rpc/v2/files/delete_project?project_identifier=research_project_112') + response = self.client.post( + "/rpc/v2/files/delete_project?project_identifier=research_project_112" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_files_are_marked_deleted(self): - files_count_before = File.objects.filter(project_identifier='research_project_112').count() - response = self.client.post('/rpc/v2/files/delete_project?project_identifier=research_project_112') - self.assertEqual(files_count_before, response.data['deleted_files_count']) + files_count_before = File.objects.filter(project_identifier="research_project_112").count() + response = self.client.post( + "/rpc/v2/files/delete_project?project_identifier=research_project_112" + ) + self.assertEqual(files_count_before, response.data["deleted_files_count"]) def test_directories_are_deleted(self): - self.client.post('/rpc/v2/files/delete_project?project_identifier=research_project_112') - directories_count_after = Directory.objects.filter(project_identifier='research_project_112').count() + self.client.post("/rpc/v2/files/delete_project?project_identifier=research_project_112") + directories_count_after = Directory.objects.filter( + project_identifier="research_project_112" + ).count() self.assertEqual(directories_count_after, 0) def test_datasets_are_marked_deprecated(self): - file_ids = File.objects.filter(project_identifier='project_x').values_list('id', flat=True) - related_dataset = CatalogRecordV2.objects.filter(files__in=file_ids).distinct('id')[0] + file_ids = File.objects.filter(project_identifier="project_x").values_list("id", flat=True) + related_dataset = CatalogRecordV2.objects.filter(files__in=file_ids).distinct("id")[0] - response = self.client.post('/rpc/v2/files/delete_project?project_identifier=project_x') + response = self.client.post("/rpc/v2/files/delete_project?project_identifier=project_x") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/datasets/%s' % related_dataset.identifier) - self.assertEqual(response.data['deprecated'], True) + response = self.client.get("/rest/v2/datasets/%s" % related_dataset.identifier) + self.assertEqual(response.data["deprecated"], True) diff --git a/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py b/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py index da448abf..674388b7 100755 --- a/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py @@ -20,23 +20,22 @@ class StatisticRPCCommon(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ super().setUpClass() - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) def setUp(self): super().setUp() - self._use_http_authorization(username='metax') + self._use_http_authorization(username="metax") self._setup_testdata() def _setup_testdata(self): - test_orgs = ['org_1', 'org_2', 'org_3'] - access_types = [ v for k, v in ACCESS_TYPES.items() ] + test_orgs = ["org_1", "org_2", "org_3"] + access_types = [v for k, v in ACCESS_TYPES.items()] # ensure testdata has something sensible to test against. # todo may be feasible to move these to generate_test_data without breaking all tests ? @@ -53,72 +52,76 @@ def _setup_testdata(self): # distribute records between access types if cr.id % 5 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[4] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[4] elif cr.id % 4 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[3] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[3] elif cr.id % 3 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[2] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[2] elif cr.id % 2 == 0: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[1] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[1] else: - cr.research_dataset['access_rights']['access_type']['identifier'] = access_types[0] + cr.research_dataset["access_rights"]["access_type"]["identifier"] = access_types[0] # distribute records between some creation months - date = '20%s-%s-13' + date = "20%s-%s-13" if cr.id % 8 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '06')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "06")) elif cr.id % 7 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '07')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "07")) elif cr.id % 6 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '10')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "10")) elif cr.id % 5 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '11')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "11")) elif cr.id % 4 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('18', '12')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("18", "12")) elif cr.id % 3 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('19', '01')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("19", "01")) elif cr.id % 2 == 0: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('19', '02')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("19", "02")) else: - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ('19', '03')) + cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date % ("19", "03")) # set some records as "created through end user api" if cr.id % 10 == 0: cr.service_created = None - cr.user_created = 'abc%d@fairdataid' % cr.id + cr.user_created = "abc%d@fairdataid" % cr.id cr.force_save() # create a few files which do not belong to any datasets - response = self.client.get('/rest/v2/files/1', format='json') + response = self.client.get("/rest/v2/files/1", format="json") files = [] for i in range(5): f = deepcopy(response.data) - del f['id'] - f['identifier'] += 'unique' + str(i) - f['file_name'] += str(i) - f['file_path'] += str(i) - f['project_identifier'] = 'prj123' + del f["id"] + f["identifier"] += "unique" + str(i) + f["file_name"] += str(i) + f["file_path"] += str(i) + f["project_identifier"] = "prj123" files.append(f) - response = self.client.post('/rest/v2/files', files, format='json') + response = self.client.post("/rest/v2/files", files, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.get('/rest/v2/directories/update_byte_sizes_and_file_counts', format='json') + response = self.client.get( + "/rest/v2/directories/update_byte_sizes_and_file_counts", format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get('/rest/v2/datasets/update_cr_total_files_byte_sizes', format='json') + response = self.client.get( + "/rest/v2/datasets/update_cr_total_files_byte_sizes", format="json" + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) # create legacy datacatalog - dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema='ida').first() - dc.catalog_json['identifier'] = settings.LEGACY_CATALOGS[0] + dc = DataCatalog.objects.filter(catalog_json__research_dataset_schema="ida").first() + dc.catalog_json["identifier"] = settings.LEGACY_CATALOGS[0] dc_json = { "catalog_record_services_create": "testuser,api_auth_user,metax", "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_json": dc.catalog_json + "catalog_json": dc.catalog_json, } - response = self.client.post('/rest/v2/datacatalogs', dc_json, format="json") + response = self.client.post("/rest/v2/datacatalogs", dc_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) def _set_deprecated_dataset(self, id=1): @@ -136,16 +139,12 @@ def _create_legacy_dataset(self): Creates one new legacy dataset and returns its id """ legacy_dataset = { - "data_catalog": { - "identifier": settings.LEGACY_CATALOGS[0] - }, + "data_catalog": {"identifier": settings.LEGACY_CATALOGS[0]}, "metadata_owner_org": "some_org_id", "metadata_provider_org": "some_org_id", "metadata_provider_user": "some_user_id", "research_dataset": { - "title": { - "en": "Test Dataset Title" - }, + "title": {"en": "Test Dataset Title"}, "description": { "en": "A descriptive description describing the contents of this dataset. Must be descriptive." }, @@ -154,54 +153,44 @@ def _create_legacy_dataset(self): "identifier": "pid:urn:1", "title": "File Title", "description": "informative description", - "use_category": { - "identifier": "method" - } + "use_category": {"identifier": "method"}, }, { "identifier": "pid:urn:3", "title": "File Title", "description": "informative description", - "use_category": { - "identifier": "method" - } - } + "use_category": {"identifier": "method"}, + }, ], "creator": [ { "name": "Teppo Testaaja", "@type": "Person", "member_of": { - "name": { - "fi": "Testiorganisaatio" - }, - "@type": "Organization" - } + "name": {"fi": "Testiorganisaatio"}, + "@type": "Organization", + }, } ], "access_rights": { "access_type": { "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "pref_label": { - "fi": "Avoin", - "en": "Open", - "und": "Avoin" - }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type" + "pref_label": {"fi": "Avoin", "en": "Open", "und": "Avoin"}, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", } }, - "preferred_identifier": "uniikkinen_aidentifaijeri" - } + "preferred_identifier": "uniikkinen_aidentifaijeri", + }, } - response = self.client.post('/rest/v2/datasets', legacy_dataset, format="json") + response = self.client.post("/rest/v2/datasets", legacy_dataset, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data['id'] + return response.data["id"] def _create_new_dataset(self, dataset_json): - response = self.client.post('/rest/v2/datasets', dataset_json, format="json") + response = self.client.post("/rest/v2/datasets", dataset_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_cr_id = response.data['id'] + new_cr_id = response.data["id"] return new_cr_id @@ -209,20 +198,25 @@ def _create_new_dataset_version(self, id=1): """ Finds the latest version of given dataset, and creates a new version of it. """ - response = self.client.get(f'/rest/v2/datasets/{id}?include_legacy', format='json') + response = self.client.get(f"/rest/v2/datasets/{id}?include_legacy", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr = response.data - while cr.get('next_dataset_version', False): - id = cr['next_dataset_version']['id'] - response = self.client.get(f'/rest/v2/datasets/{id}?include_legacy', format='json') + while cr.get("next_dataset_version", False): + id = cr["next_dataset_version"]["id"] + response = self.client.get(f"/rest/v2/datasets/{id}?include_legacy", format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) cr = response.data - response = self.client.post(f'/rpc/v2/datasets/create_new_version?identifier={cr["id"]}', format='json') + response = self.client.post( + f'/rpc/v2/datasets/create_new_version?identifier={cr["id"]}', format="json" + ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_version_id = response.data['id'] + new_version_id = response.data["id"] - response = self.client.post(f'/rpc/v2/datasets/publish_dataset?identifier={new_version_id}', format='json') + response = self.client.post( + f"/rpc/v2/datasets/publish_dataset?identifier={new_version_id}", + format="json", + ) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) return new_version_id @@ -240,10 +234,9 @@ def _get_catalog_record_size(self, id): """ Returns the size of given record id """ - size = CatalogRecord\ - .objects_unfiltered\ - .get(id=id)\ - .research_dataset.get('total_files_byte_size', 0) + size = CatalogRecord.objects_unfiltered.get(id=id).research_dataset.get( + "total_files_byte_size", 0 + ) return size @@ -252,7 +245,7 @@ def _get_byte_size_of_month(self, date): Returns the byte size for given date. date is in format 'YYYY-MM' """ query = CatalogRecord.objects_unfiltered.filter(date_created__startswith=date) - list_of_sizes = [ cr.research_dataset.get('total_files_byte_size', 0) for cr in query ] + list_of_sizes = [cr.research_dataset.get("total_files_byte_size", 0) for cr in query] return sum(list_of_sizes) @@ -261,7 +254,7 @@ def _get_total_byte_size(self): Returns byte size of all datasets in database """ query = CatalogRecord.objects_unfiltered.all() - list_of_sizes = [cr.research_dataset.get('total_files_byte_size', 0) for cr in query] + list_of_sizes = [cr.research_dataset.get("total_files_byte_size", 0) for cr in query] return sum(list_of_sizes) @@ -269,7 +262,9 @@ def _get_dataset_count_after(self, date): """ Return the total count after the date provided (inclusive). date is in format 'YYYY-MM-DD' """ - return CatalogRecord.objects_unfiltered.filter(date_created__gte=f'{date}T00:00:00+03:00').count() + return CatalogRecord.objects_unfiltered.filter( + date_created__gte=f"{date}T00:00:00+03:00" + ).count() def _get_dataset_count_of_month(self, date): """ @@ -290,7 +285,7 @@ def _set_cr_datacatalog(self, cr_id, catalog_id): def _set_dataset_as_draft(self, cr_id): cr = CatalogRecord.objects.get(pk=cr_id) - cr.state = 'draft' + cr.state = "draft" cr.force_save() def _set_cr_organization(self, cr_id, org): @@ -310,37 +305,39 @@ def test_count_datasets_single(self): Tests single parameters for api. Empty removed and legacy parameters returns true AND false matches """ total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get('/rpc/v2/statistics/count_datasets').data - self.assertEqual(total_count, response['count'], response) + response = self.client.get("/rpc/v2/statistics/count_datasets").data + self.assertEqual(total_count, response["count"], response) # test removed -parameter self._set_removed_dataset(id=2) - response = self.client.get('/rpc/v2/statistics/count_datasets?removed=true').data - self.assertEqual(response['count'], 1, response) + response = self.client.get("/rpc/v2/statistics/count_datasets?removed=true").data + self.assertEqual(response["count"], 1, response) - response = self.client.get('/rpc/v2/statistics/count_datasets?removed=false').data - self.assertEqual(response['count'], total_count - 1, response) + response = self.client.get("/rpc/v2/statistics/count_datasets?removed=false").data + self.assertEqual(response["count"], total_count - 1, response) # test legacy -parameter self._create_legacy_dataset() total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get('/rpc/v2/statistics/count_datasets?legacy=true').data - self.assertEqual(response['count'], 1, response) + response = self.client.get("/rpc/v2/statistics/count_datasets?legacy=true").data + self.assertEqual(response["count"], 1, response) - response = self.client.get('/rpc/v2/statistics/count_datasets?legacy=false').data - self.assertEqual(response['count'], total_count - 1, response) + response = self.client.get("/rpc/v2/statistics/count_datasets?legacy=false").data + self.assertEqual(response["count"], total_count - 1, response) # test latest -parameter self._create_new_dataset_version() self._create_new_dataset_version() total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get('/rpc/v2/statistics/count_datasets?latest=false').data # returns all - self.assertEqual(response['count'], total_count, response) + response = self.client.get( + "/rpc/v2/statistics/count_datasets?latest=false" + ).data # returns all + self.assertEqual(response["count"], total_count, response) - with_param = self.client.get('/rpc/v2/statistics/count_datasets?latest=true').data - without_param = self.client.get('/rpc/v2/statistics/count_datasets').data # default is true - self.assertEqual(with_param['count'], total_count - 2, with_param) - self.assertEqual(with_param['count'], without_param['count'], with_param) + with_param = self.client.get("/rpc/v2/statistics/count_datasets?latest=true").data + without_param = self.client.get("/rpc/v2/statistics/count_datasets").data # default is true + self.assertEqual(with_param["count"], total_count - 2, with_param) + self.assertEqual(with_param["count"], without_param["count"], with_param) def test_count_datasets_removed_latest(self): second_ver = self._create_new_dataset_version() @@ -349,21 +346,27 @@ def test_count_datasets_removed_latest(self): self._set_removed_dataset(id=second_ver) self._set_removed_dataset(id=2) - rem_lat = self.client.get('/rpc/v2/statistics/count_datasets?removed=true&latest=true').data - rem_not_lat = self.client.get('/rpc/v2/statistics/count_datasets?removed=true&latest=false').data + rem_lat = self.client.get("/rpc/v2/statistics/count_datasets?removed=true&latest=true").data + rem_not_lat = self.client.get( + "/rpc/v2/statistics/count_datasets?removed=true&latest=false" + ).data - self.assertEqual(rem_lat['count'], 1, 'Only latest versions should be checked') # id=2 - self.assertEqual(rem_not_lat['count'], 3, 'Only the prev versions should be removed') + self.assertEqual(rem_lat["count"], 1, "Only latest versions should be checked") # id=2 + self.assertEqual(rem_not_lat["count"], 3, "Only the prev versions should be removed") # create new dataset with 2 versions - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self._create_new_dataset_version(response.data['id']) + self._create_new_dataset_version(response.data["id"]) - not_rem_lat = self.client.get('/rpc/v2/statistics/count_datasets?removed=false&latest=true').data - not_rem_not_lat = self.client.get('/rpc/v2/statistics/count_datasets?removed=false&latest=false').data + not_rem_lat = self.client.get( + "/rpc/v2/statistics/count_datasets?removed=false&latest=true" + ).data + not_rem_not_lat = self.client.get( + "/rpc/v2/statistics/count_datasets?removed=false&latest=false" + ).data - self.assertEqual(not_rem_lat['count'], not_rem_not_lat['count'] - 1) + self.assertEqual(not_rem_lat["count"], not_rem_not_lat["count"] - 1) def test_count_datasets_removed_legacy(self): self._create_legacy_dataset() @@ -372,15 +375,21 @@ def test_count_datasets_removed_legacy(self): self._set_removed_dataset(leg_cr) total_count = CatalogRecord.objects_unfiltered.count() - rem_leg = self.client.get('/rpc/v2/statistics/count_datasets?removed=true&legacy=true').data - rem_not_leg = self.client.get('/rpc/v2/statistics/count_datasets?removed=true&legacy=false').data - not_rem_leg = self.client.get('/rpc/v2/statistics/count_datasets?removed=false&legacy=true').data - not_rem_not_leg = self.client.get('/rpc/v2/statistics/count_datasets?removed=false&legacy=false').data - - self.assertEqual(rem_leg['count'], 1) - self.assertEqual(rem_not_leg['count'], 0) - self.assertEqual(not_rem_leg['count'], 2) - self.assertEqual(not_rem_not_leg['count'], total_count - 3) + rem_leg = self.client.get("/rpc/v2/statistics/count_datasets?removed=true&legacy=true").data + rem_not_leg = self.client.get( + "/rpc/v2/statistics/count_datasets?removed=true&legacy=false" + ).data + not_rem_leg = self.client.get( + "/rpc/v2/statistics/count_datasets?removed=false&legacy=true" + ).data + not_rem_not_leg = self.client.get( + "/rpc/v2/statistics/count_datasets?removed=false&legacy=false" + ).data + + self.assertEqual(rem_leg["count"], 1) + self.assertEqual(rem_not_leg["count"], 0) + self.assertEqual(not_rem_leg["count"], 2) + self.assertEqual(not_rem_not_leg["count"], total_count - 3) def test_count_datasets_latest_legacy(self): leg_cr = self._create_legacy_dataset() @@ -388,38 +397,42 @@ def test_count_datasets_latest_legacy(self): self._create_new_dataset_version(leg_cr) total_count = CatalogRecord.objects_unfiltered.count() - leg_lat = self.client.get('/rpc/v2/statistics/count_datasets?legacy=true&latest=true').data - leg_not_lat = self.client.get('/rpc/v2/statistics/count_datasets?legacy=true&latest=false').data - not_leg_not_lat = self.client.get('/rpc/v2/statistics/count_datasets?legacy=false&latest=false').data + leg_lat = self.client.get("/rpc/v2/statistics/count_datasets?legacy=true&latest=true").data + leg_not_lat = self.client.get( + "/rpc/v2/statistics/count_datasets?legacy=true&latest=false" + ).data + not_leg_not_lat = self.client.get( + "/rpc/v2/statistics/count_datasets?legacy=false&latest=false" + ).data - self.assertEqual(leg_lat['count'], 1) - self.assertEqual(leg_not_lat['count'], 3) - self.assertEqual(not_leg_not_lat['count'], total_count - 3) + self.assertEqual(leg_lat["count"], 1) + self.assertEqual(leg_not_lat["count"], 3) + self.assertEqual(not_leg_not_lat["count"], total_count - 3) def test_count_datasets_from_date(self): total_count = CatalogRecord.objects_unfiltered.count() - june_count = self._get_dataset_count_of_month('2018-06') - july_count = self._get_dataset_count_of_month('2018-07') - aug_count = self._get_dataset_count_of_month('2018-08') + june_count = self._get_dataset_count_of_month("2018-06") + july_count = self._get_dataset_count_of_month("2018-07") + aug_count = self._get_dataset_count_of_month("2018-08") - res = self.client.get('/rpc/v2/statistics/count_datasets?from_date=2018-07-01').data - self.assertEqual(res['count'], total_count - june_count) + res = self.client.get("/rpc/v2/statistics/count_datasets?from_date=2018-07-01").data + self.assertEqual(res["count"], total_count - june_count) - res = self.client.get('/rpc/v2/statistics/count_datasets?from_date=2018-09-02').data - self.assertEqual(res['count'], total_count - june_count - july_count - aug_count) + res = self.client.get("/rpc/v2/statistics/count_datasets?from_date=2018-09-02").data + self.assertEqual(res["count"], total_count - june_count - july_count - aug_count) def test_count_datasets_to_date(self): total_count = CatalogRecord.objects_unfiltered.count() - after_jan_count = self._get_dataset_count_after('2019-01-02') - after_feb_count = self._get_dataset_count_after('2019-02-02') + after_jan_count = self._get_dataset_count_after("2019-01-02") + after_feb_count = self._get_dataset_count_after("2019-02-02") - res = self.client.get('/rpc/v2/statistics/count_datasets?to_date=2019-01-01').data - self.assertEqual(res['count'], total_count - after_jan_count) + res = self.client.get("/rpc/v2/statistics/count_datasets?to_date=2019-01-01").data + self.assertEqual(res["count"], total_count - after_jan_count) - res = self.client.get('/rpc/v2/statistics/count_datasets?to_date=2019-02-01').data - self.assertEqual(res['count'], total_count - after_feb_count) + res = self.client.get("/rpc/v2/statistics/count_datasets?to_date=2019-02-01").data + self.assertEqual(res["count"], total_count - after_feb_count) class StatisticRPCAllDatasetsCumulative(StatisticRPCCommon, CatalogRecordApiWriteCommon): @@ -427,8 +440,8 @@ class StatisticRPCAllDatasetsCumulative(StatisticRPCCommon, CatalogRecordApiWrit Test suite for all_datasets_cumulative. Test only optional parameters removed, legacy and latest for now. """ - url = '/rpc/v2/statistics/all_datasets_cumulative' - dateparam_all = 'from_date=2018-06&to_date=2019-03' + url = "/rpc/v2/statistics/all_datasets_cumulative" + dateparam_all = "from_date=2018-06&to_date=2019-03" def test_all_datasets_cumulative(self): """ @@ -442,50 +455,58 @@ def test_all_datasets_cumulative(self): """ # test bad query parameters - response = self.client.get(f'{self.url}') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'from_date and to_date are required') + response = self.client.get(f"{self.url}") + self.assertEqual( + response.status_code, + status.HTTP_400_BAD_REQUEST, + "from_date and to_date are required", + ) - response = self.client.get(f'{self.url}?from_date=2019-11&to_date=bad_parameter') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'date format is YYYY-MM') + response = self.client.get(f"{self.url}?from_date=2019-11&to_date=bad_parameter") + self.assertEqual( + response.status_code, status.HTTP_400_BAD_REQUEST, "date format is YYYY-MM" + ) - response = self.client.get(f'{self.url}?from_date=2019-11&to_date=2019-11-15') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, 'date format is YYYY-MM') + response = self.client.get(f"{self.url}?from_date=2019-11&to_date=2019-11-15") + self.assertEqual( + response.status_code, status.HTTP_400_BAD_REQUEST, "date format is YYYY-MM" + ) # test the basic functionality - june_size = self._get_byte_size_of_month('2018-06') - june_count = self._get_dataset_count_of_month('2018-06') + june_size = self._get_byte_size_of_month("2018-06") + june_count = self._get_dataset_count_of_month("2018-06") - july_size = self._get_byte_size_of_month('2018-07') - july_count = self._get_dataset_count_of_month('2018-07') + july_size = self._get_byte_size_of_month("2018-07") + july_count = self._get_dataset_count_of_month("2018-07") - march_size = self._get_byte_size_of_month('2019-03') - march_count = self._get_dataset_count_of_month('2019-03') + march_size = self._get_byte_size_of_month("2019-03") + march_count = self._get_dataset_count_of_month("2019-03") total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - response = self.client.get(f'{self.url}?{self.dateparam_all}').data + response = self.client.get(f"{self.url}?{self.dateparam_all}").data # ensure the counts and byte sizes are calculated correctly - self.assertEqual(response[0]['count'], june_count, response) - self.assertEqual(response[0]['ida_byte_size'], june_size, response) - self.assertEqual(response[0]['count_cumulative'], june_count, response) - self.assertEqual(response[0]['ida_byte_size_cumulative'], june_size, response) + self.assertEqual(response[0]["count"], june_count, response) + self.assertEqual(response[0]["ida_byte_size"], june_size, response) + self.assertEqual(response[0]["count_cumulative"], june_count, response) + self.assertEqual(response[0]["ida_byte_size_cumulative"], june_size, response) - self.assertEqual(response[1]['count'], july_count, response) - self.assertEqual(response[1]['ida_byte_size'], july_size, response) - self.assertEqual(response[1]['count_cumulative'], june_count + july_count, response) - self.assertEqual(response[1]['ida_byte_size_cumulative'], june_size + july_size, response) + self.assertEqual(response[1]["count"], july_count, response) + self.assertEqual(response[1]["ida_byte_size"], july_size, response) + self.assertEqual(response[1]["count_cumulative"], june_count + july_count, response) + self.assertEqual(response[1]["ida_byte_size_cumulative"], june_size + july_size, response) - self.assertEqual(response[-1]['count'], march_count, response) - self.assertEqual(response[-1]['ida_byte_size'], march_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size, response) + self.assertEqual(response[-1]["count"], march_count, response) + self.assertEqual(response[-1]["ida_byte_size"], march_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size, response) # test that only datasets from beginning of from_date is counted - response = self.client.get(f'{self.url}?from_date=2018-07&to_date=2019-03').data + response = self.client.get(f"{self.url}?from_date=2018-07&to_date=2019-03").data - self.assertEqual(response[-1]['count_cumulative'], total_count - june_count, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size - june_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count - june_count, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size - june_size, response) def test_all_datasets_cumulative_single(self): """ @@ -495,92 +516,116 @@ def test_all_datasets_cumulative_single(self): total_size = self._get_total_byte_size() # test removed -parameter - june_size = self._get_byte_size_of_month('2018-06') - june_count = self._get_dataset_count_of_month('2018-06') + june_size = self._get_byte_size_of_month("2018-06") + june_count = self._get_dataset_count_of_month("2018-06") - self._set_removed_dataset(id=8) # belongs to 2018-06, i.e. the first interval + self._set_removed_dataset(id=8) # belongs to 2018-06, i.e. the first interval removed_size = self._get_catalog_record_size(id=8) - response = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true").data # ensure that only the first month (2018-06) contains dataset and that cumulative is calculated correctly - self.assertEqual(response[0]['count'], 1, response) - self.assertEqual(response[0]['ida_byte_size'], removed_size, response) - self.assertEqual(response[0]['count_cumulative'], 1, response) - self.assertEqual(response[0]['ida_byte_size_cumulative'], removed_size, response) + self.assertEqual(response[0]["count"], 1, response) + self.assertEqual(response[0]["ida_byte_size"], removed_size, response) + self.assertEqual(response[0]["count_cumulative"], 1, response) + self.assertEqual(response[0]["ida_byte_size_cumulative"], removed_size, response) - self.assertEqual(response[-1]['count_cumulative'], 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], removed_size, response) + self.assertEqual(response[-1]["count_cumulative"], 1, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], removed_size, response) - response = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=false").data # ensure that the correct dataset is missing from results - self.assertEqual(response[0]['count'], june_count - 1, response) - self.assertEqual(response[0]['ida_byte_size'], june_size - removed_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count - 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size - removed_size, response) + self.assertEqual(response[0]["count"], june_count - 1, response) + self.assertEqual(response[0]["ida_byte_size"], june_size - removed_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count - 1, response) + self.assertEqual( + response[-1]["ida_byte_size_cumulative"], + total_size - removed_size, + response, + ) # test legacy -parameter - leg_cr = self._create_legacy_dataset() # legacy cr belongs to 2019-03, i.e. the last interval - self._set_dataset_creation_date(leg_cr, '2019-03-13') + leg_cr = ( + self._create_legacy_dataset() + ) # legacy cr belongs to 2019-03, i.e. the last interval + self._set_dataset_creation_date(leg_cr, "2019-03-13") legacy_size = self._get_catalog_record_size(id=leg_cr) total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - march_size = self._get_byte_size_of_month('2019-03') - march_count = self._get_dataset_count_of_month('2019-03') + march_size = self._get_byte_size_of_month("2019-03") + march_count = self._get_dataset_count_of_month("2019-03") - response = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=true').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true").data - self.assertEqual(response[-1]['count'], 1, response) - self.assertEqual(response[-1]['ida_byte_size'], legacy_size, response) - self.assertEqual(response[-1]['count_cumulative'], 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], legacy_size, response) + self.assertEqual(response[-1]["count"], 1, response) + self.assertEqual(response[-1]["ida_byte_size"], legacy_size, response) + self.assertEqual(response[-1]["count_cumulative"], 1, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], legacy_size, response) - response = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=false').data + response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=false").data - self.assertEqual(response[-1]['count'], march_count - 1, response) - self.assertEqual(response[-1]['ida_byte_size'], march_size - legacy_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count - 1, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size - legacy_size, response) + self.assertEqual(response[-1]["count"], march_count - 1, response) + self.assertEqual(response[-1]["ida_byte_size"], march_size - legacy_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count - 1, response) + self.assertEqual( + response[-1]["ida_byte_size_cumulative"], total_size - legacy_size, response + ) # test latest -parameter # new versions will belong to 2019-03, i.e. the last interval second = self._create_new_dataset_version() - self._set_dataset_creation_date(second, '2019-03-17') + self._set_dataset_creation_date(second, "2019-03-17") old_ver_size = self._get_catalog_record_size(id=1) total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - march_size = self._get_byte_size_of_month('2019-03') - march_count = self._get_dataset_count_of_month('2019-03') - - response = self.client.get(f'{self.url}?{self.dateparam_all}&latest=false').data # returns all - self.assertEqual(response[-1]['count'], march_count, response) - self.assertEqual(response[-1]['ida_byte_size'], march_size, response) - self.assertEqual(response[-1]['count_cumulative'], total_count, response) - self.assertEqual(response[-1]['ida_byte_size_cumulative'], total_size, response) - - with_param = self.client.get(f'{self.url}?{self.dateparam_all}&latest=true').data - self.assertEqual(with_param[-1]['count'], march_count - 1, with_param) - self.assertEqual(with_param[-1]['ida_byte_size'], march_size - old_ver_size, with_param) - self.assertEqual(with_param[-1]['count_cumulative'], total_count - 1, with_param) - self.assertEqual(with_param[-1]['ida_byte_size_cumulative'], total_size - old_ver_size, response) + march_size = self._get_byte_size_of_month("2019-03") + march_count = self._get_dataset_count_of_month("2019-03") + + response = self.client.get( + f"{self.url}?{self.dateparam_all}&latest=false" + ).data # returns all + self.assertEqual(response[-1]["count"], march_count, response) + self.assertEqual(response[-1]["ida_byte_size"], march_size, response) + self.assertEqual(response[-1]["count_cumulative"], total_count, response) + self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size, response) + + with_param = self.client.get(f"{self.url}?{self.dateparam_all}&latest=true").data + self.assertEqual(with_param[-1]["count"], march_count - 1, with_param) + self.assertEqual(with_param[-1]["ida_byte_size"], march_size - old_ver_size, with_param) + self.assertEqual(with_param[-1]["count_cumulative"], total_count - 1, with_param) + self.assertEqual( + with_param[-1]["ida_byte_size_cumulative"], + total_size - old_ver_size, + response, + ) # ensure that default value(true) is working as expected - without_param = self.client.get(f'{self.url}?{self.dateparam_all}').data - self.assertEqual(with_param[-1]['count'], without_param[-1]['count'], with_param) - self.assertEqual(with_param[-1]['ida_byte_size'], without_param[-1]['ida_byte_size'], with_param) - self.assertEqual(with_param[-1]['count_cumulative'], without_param[-1]['count_cumulative'], with_param) + without_param = self.client.get(f"{self.url}?{self.dateparam_all}").data + self.assertEqual(with_param[-1]["count"], without_param[-1]["count"], with_param) + self.assertEqual( + with_param[-1]["ida_byte_size"], + without_param[-1]["ida_byte_size"], + with_param, + ) + self.assertEqual( + with_param[-1]["count_cumulative"], + without_param[-1]["count_cumulative"], + with_param, + ) self.assertEqual( - with_param[-1]['ida_byte_size_cumulative'], without_param[-1]['ida_byte_size_cumulative'], with_param + with_param[-1]["ida_byte_size_cumulative"], + without_param[-1]["ida_byte_size_cumulative"], + with_param, ) def test_all_datasets_cumulative_removed_latest(self): second = self._create_new_dataset_version() - self._set_dataset_creation_date(second, '2019-03-11') + self._set_dataset_creation_date(second, "2019-03-11") self._set_removed_dataset(id=1) self._set_removed_dataset(id=second) @@ -589,61 +634,81 @@ def test_all_datasets_cumulative_removed_latest(self): removed_size = self._get_catalog_record_size(id=1) + latest_size removed_count = 2 - rem_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&latest=true').data + rem_lat = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true&latest=true").data - self.assertEqual(rem_lat[-1]['count'], 1, rem_lat) # id=second - self.assertEqual(rem_lat[-1]['ida_byte_size'], latest_size, rem_lat) - self.assertEqual(rem_lat[-1]['count_cumulative'], 1, rem_lat) - self.assertEqual(rem_lat[-1]['ida_byte_size_cumulative'], latest_size, rem_lat) + self.assertEqual(rem_lat[-1]["count"], 1, rem_lat) # id=second + self.assertEqual(rem_lat[-1]["ida_byte_size"], latest_size, rem_lat) + self.assertEqual(rem_lat[-1]["count_cumulative"], 1, rem_lat) + self.assertEqual(rem_lat[-1]["ida_byte_size_cumulative"], latest_size, rem_lat) - rem_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&latest=false').data + rem_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=true&latest=false" + ).data - self.assertEqual(rem_not_lat[-1]['count'], removed_count, rem_not_lat) # id=second - self.assertEqual(rem_not_lat[-1]['ida_byte_size'], removed_size, rem_not_lat) - self.assertEqual(rem_not_lat[-1]['count_cumulative'], removed_count, rem_not_lat) - self.assertEqual(rem_not_lat[-1]['ida_byte_size_cumulative'], removed_size, rem_not_lat) + self.assertEqual(rem_not_lat[-1]["count"], removed_count, rem_not_lat) # id=second + self.assertEqual(rem_not_lat[-1]["ida_byte_size"], removed_size, rem_not_lat) + self.assertEqual(rem_not_lat[-1]["count_cumulative"], removed_count, rem_not_lat) + self.assertEqual(rem_not_lat[-1]["ida_byte_size_cumulative"], removed_size, rem_not_lat) # create new dataset with two versions, which will not be deleted new_cr_id = self._create_new_dataset(self.cr_test_data) - self._set_dataset_creation_date(new_cr_id, '2019-01-02') + self._set_dataset_creation_date(new_cr_id, "2019-01-02") new_cr_ver = self._create_new_dataset_version(new_cr_id) - self._set_dataset_creation_date(new_cr_ver, '2019-01-06') + self._set_dataset_creation_date(new_cr_ver, "2019-01-06") old_version_size = self._get_catalog_record_size(id=new_cr_id) - jan_count = self._get_dataset_count_of_month('2019-01') - jan_size = self._get_byte_size_of_month('2019-01') + jan_count = self._get_dataset_count_of_month("2019-01") + jan_size = self._get_byte_size_of_month("2019-01") total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - not_rem_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&latest=true').data + not_rem_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&latest=true" + ).data # missing the removed dataset from before and dataset id='new_cr_id' - self.assertEqual(not_rem_lat[-3]['count'], jan_count - 1, not_rem_lat) - self.assertEqual(not_rem_lat[-3]['ida_byte_size'], jan_size - old_version_size, not_rem_lat) - self.assertEqual(not_rem_lat[-1]['count_cumulative'], total_count - removed_count - 1, not_rem_lat) - self.assertEqual(not_rem_lat[-1]['ida_byte_size_cumulative'], total_size - removed_size - old_version_size, - not_rem_lat) + self.assertEqual(not_rem_lat[-3]["count"], jan_count - 1, not_rem_lat) + self.assertEqual(not_rem_lat[-3]["ida_byte_size"], jan_size - old_version_size, not_rem_lat) + self.assertEqual( + not_rem_lat[-1]["count_cumulative"], + total_count - removed_count - 1, + not_rem_lat, + ) + self.assertEqual( + not_rem_lat[-1]["ida_byte_size_cumulative"], + total_size - removed_size - old_version_size, + not_rem_lat, + ) - not_rem_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&latest=false').data + not_rem_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&latest=false" + ).data - self.assertEqual(not_rem_not_lat[-3]['count'], jan_count, not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-3]['ida_byte_size'], jan_size, not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-1]['count_cumulative'], total_count - removed_count, - not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-1]['ida_byte_size_cumulative'], total_size - removed_size, not_rem_not_lat) + self.assertEqual(not_rem_not_lat[-3]["count"], jan_count, not_rem_not_lat) + self.assertEqual(not_rem_not_lat[-3]["ida_byte_size"], jan_size, not_rem_not_lat) + self.assertEqual( + not_rem_not_lat[-1]["count_cumulative"], + total_count - removed_count, + not_rem_not_lat, + ) + self.assertEqual( + not_rem_not_lat[-1]["ida_byte_size_cumulative"], + total_size - removed_size, + not_rem_not_lat, + ) def test_all_datasets_cumulative_removed_legacy(self): leg_cr_1 = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr_1, '2018-07-03') + self._set_dataset_creation_date(leg_cr_1, "2018-07-03") leg_cr_2 = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr_2, '2019-02-08') + self._set_dataset_creation_date(leg_cr_2, "2019-02-08") self._set_removed_dataset(leg_cr_2) - self._set_removed_dataset(id=8) # belongs to first interval, i.e. 2018-06 + self._set_removed_dataset(id=8) # belongs to first interval, i.e. 2018-06 leg_non_rem_size = self._get_catalog_record_size(leg_cr_1) leg_removed_size = self._get_catalog_record_size(leg_cr_2) @@ -655,48 +720,70 @@ def test_all_datasets_cumulative_removed_legacy(self): total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - june_count = self._get_dataset_count_of_month('2018-06') - june_size = self._get_byte_size_of_month('2018-06') + june_count = self._get_dataset_count_of_month("2018-06") + june_size = self._get_byte_size_of_month("2018-06") - feb_count = self._get_dataset_count_of_month('2019-02') - feb_size = self._get_byte_size_of_month('2019-02') + feb_count = self._get_dataset_count_of_month("2019-02") + feb_size = self._get_byte_size_of_month("2019-02") - rem_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&legacy=true').data + rem_leg = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true&legacy=true").data - self.assertEqual(rem_leg[-2]['count'], 1, rem_leg) - self.assertEqual(rem_leg[-2]['ida_byte_size'], leg_removed_size, rem_leg) - self.assertEqual(rem_leg[-1]['count_cumulative'], 1, rem_leg) - self.assertEqual(rem_leg[-1]['ida_byte_size_cumulative'], leg_removed_size, rem_leg) + self.assertEqual(rem_leg[-2]["count"], 1, rem_leg) + self.assertEqual(rem_leg[-2]["ida_byte_size"], leg_removed_size, rem_leg) + self.assertEqual(rem_leg[-1]["count_cumulative"], 1, rem_leg) + self.assertEqual(rem_leg[-1]["ida_byte_size_cumulative"], leg_removed_size, rem_leg) - rem_not_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=true&legacy=false').data + rem_not_leg = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=true&legacy=false" + ).data - self.assertEqual(rem_not_leg[0]['count'], 1, rem_not_leg) - self.assertEqual(rem_not_leg[0]['ida_byte_size'], removed_size, rem_not_leg) - self.assertEqual(rem_not_leg[-1]['count_cumulative'], 1, rem_not_leg) - self.assertEqual(rem_not_leg[-1]['ida_byte_size_cumulative'], removed_size, rem_not_leg) + self.assertEqual(rem_not_leg[0]["count"], 1, rem_not_leg) + self.assertEqual(rem_not_leg[0]["ida_byte_size"], removed_size, rem_not_leg) + self.assertEqual(rem_not_leg[-1]["count_cumulative"], 1, rem_not_leg) + self.assertEqual(rem_not_leg[-1]["ida_byte_size_cumulative"], removed_size, rem_not_leg) - not_rem_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&legacy=true').data + not_rem_leg = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&legacy=true" + ).data - self.assertEqual(not_rem_leg[1]['count'], 1, not_rem_leg) - self.assertEqual(not_rem_leg[1]['ida_byte_size'], leg_non_rem_size, not_rem_leg) - self.assertEqual(not_rem_leg[-1]['count_cumulative'], 1, not_rem_leg) - self.assertEqual(not_rem_leg[-1]['ida_byte_size_cumulative'], leg_non_rem_size, not_rem_leg) + self.assertEqual(not_rem_leg[1]["count"], 1, not_rem_leg) + self.assertEqual(not_rem_leg[1]["ida_byte_size"], leg_non_rem_size, not_rem_leg) + self.assertEqual(not_rem_leg[-1]["count_cumulative"], 1, not_rem_leg) + self.assertEqual(not_rem_leg[-1]["ida_byte_size_cumulative"], leg_non_rem_size, not_rem_leg) - not_rem_not_leg = self.client.get(f'{self.url}?{self.dateparam_all}&removed=false&legacy=false').data + not_rem_not_leg = self.client.get( + f"{self.url}?{self.dateparam_all}&removed=false&legacy=false" + ).data - self.assertEqual(not_rem_not_leg[0]['count'], june_count - 1, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[0]['ida_byte_size'], june_size - removed_size, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-2]['count'], feb_count - 1, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-2]['ida_byte_size'], feb_size - leg_removed_size, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-1]['count_cumulative'], total_count - rem_leg_count, not_rem_not_leg) - self.assertEqual(not_rem_not_leg[-1]['ida_byte_size_cumulative'], total_size - rem_leg_size, not_rem_not_leg) + self.assertEqual(not_rem_not_leg[0]["count"], june_count - 1, not_rem_not_leg) + self.assertEqual( + not_rem_not_leg[0]["ida_byte_size"], + june_size - removed_size, + not_rem_not_leg, + ) + self.assertEqual(not_rem_not_leg[-2]["count"], feb_count - 1, not_rem_not_leg) + self.assertEqual( + not_rem_not_leg[-2]["ida_byte_size"], + feb_size - leg_removed_size, + not_rem_not_leg, + ) + self.assertEqual( + not_rem_not_leg[-1]["count_cumulative"], + total_count - rem_leg_count, + not_rem_not_leg, + ) + self.assertEqual( + not_rem_not_leg[-1]["ida_byte_size_cumulative"], + total_size - rem_leg_size, + not_rem_not_leg, + ) def test_all_datasets_cumulative_latest_legacy(self): leg_cr = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr, '2019-03-08') + self._set_dataset_creation_date(leg_cr, "2019-03-08") second = self._create_new_dataset_version(leg_cr) - self._set_dataset_creation_date(second, '2019-03-12') + self._set_dataset_creation_date(second, "2019-03-12") leg_cr_size = self._get_catalog_record_size(id=leg_cr) second_size = self._get_catalog_record_size(id=second) @@ -707,113 +794,156 @@ def test_all_datasets_cumulative_latest_legacy(self): total_count = self._get_total_dataset_count() total_size = self._get_total_byte_size() - march_count = self._get_dataset_count_of_month('2019-03') - march_size = self._get_byte_size_of_month('2019-03') + march_count = self._get_dataset_count_of_month("2019-03") + march_size = self._get_byte_size_of_month("2019-03") - leg_lat = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=true&latest=true').data + leg_lat = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true&latest=true").data - self.assertEqual(leg_lat[-1]['count'], 1, leg_lat) - self.assertEqual(leg_lat[-1]['ida_byte_size'], second_size, leg_lat) - self.assertEqual(leg_lat[-1]['count_cumulative'], 1, leg_lat) - self.assertEqual(leg_lat[-1]['ida_byte_size_cumulative'], second_size, leg_lat) + self.assertEqual(leg_lat[-1]["count"], 1, leg_lat) + self.assertEqual(leg_lat[-1]["ida_byte_size"], second_size, leg_lat) + self.assertEqual(leg_lat[-1]["count_cumulative"], 1, leg_lat) + self.assertEqual(leg_lat[-1]["ida_byte_size_cumulative"], second_size, leg_lat) - leg_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=true&latest=false').data + leg_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&legacy=true&latest=false" + ).data - self.assertEqual(leg_not_lat[-1]['count'], legacy_count, leg_not_lat) - self.assertEqual(leg_not_lat[-1]['ida_byte_size'], legacy_size, leg_not_lat) - self.assertEqual(leg_not_lat[-1]['count_cumulative'], legacy_count, leg_not_lat) - self.assertEqual(leg_not_lat[-1]['ida_byte_size_cumulative'], legacy_size, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["count"], legacy_count, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["ida_byte_size"], legacy_size, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["count_cumulative"], legacy_count, leg_not_lat) + self.assertEqual(leg_not_lat[-1]["ida_byte_size_cumulative"], legacy_size, leg_not_lat) - not_leg_not_lat = self.client.get(f'{self.url}?{self.dateparam_all}&legacy=false&latest=false').data + not_leg_not_lat = self.client.get( + f"{self.url}?{self.dateparam_all}&legacy=false&latest=false" + ).data - self.assertEqual(not_leg_not_lat[-1]['count'], march_count - legacy_count, not_leg_not_lat) - self.assertEqual(not_leg_not_lat[-1]['ida_byte_size'], march_size - legacy_size, not_leg_not_lat) - self.assertEqual(not_leg_not_lat[-1]['count_cumulative'], total_count - legacy_count, not_leg_not_lat) - self.assertEqual(not_leg_not_lat[-1]['ida_byte_size_cumulative'], total_size - legacy_size, not_leg_not_lat) + self.assertEqual(not_leg_not_lat[-1]["count"], march_count - legacy_count, not_leg_not_lat) + self.assertEqual( + not_leg_not_lat[-1]["ida_byte_size"], + march_size - legacy_size, + not_leg_not_lat, + ) + self.assertEqual( + not_leg_not_lat[-1]["count_cumulative"], + total_count - legacy_count, + not_leg_not_lat, + ) + self.assertEqual( + not_leg_not_lat[-1]["ida_byte_size_cumulative"], + total_size - legacy_size, + not_leg_not_lat, + ) class StatisticRPCforDrafts(StatisticRPCCommon, CatalogRecordApiWriteCommon): """ Tests that drafts are not taken into account when calculating statistics """ + def test_count_datasets_api_for_drafts(self): """ Tests that rpc/statistics/count_datasets returns only count of published datasets """ - response_1 = self.client.get('/rpc/v2/statistics/count_datasets').data + response_1 = self.client.get("/rpc/v2/statistics/count_datasets").data self._set_dataset_as_draft(1) - self.assertEqual(CatalogRecord.objects.get(pk=1).state, 'draft', - 'Dataset with id=1 should have changed state to draft') + self.assertEqual( + CatalogRecord.objects.get(pk=1).state, + "draft", + "Dataset with id=1 should have changed state to draft", + ) - response_2 = self.client.get('/rpc/v2/statistics/count_datasets').data - self.assertNotEqual(response_1['count'], response_2['count'], - 'Drafts should not be returned in count_datasets api') + response_2 = self.client.get("/rpc/v2/statistics/count_datasets").data + self.assertNotEqual( + response_1["count"], + response_2["count"], + "Drafts should not be returned in count_datasets api", + ) def test_all_datasets_cumulative_for_drafts(self): """ Tests that /rpc/v2/statistics/all_datasets_cumulative returns only published datasets """ - url = '/rpc/v2/statistics/all_datasets_cumulative?from_date=2019-06&to_date=2019-06' + url = "/rpc/v2/statistics/all_datasets_cumulative?from_date=2019-06&to_date=2019-06" - self._set_dataset_creation_date(1, '2019-06-15') + self._set_dataset_creation_date(1, "2019-06-15") response_1 = self.client.get(url).data self._set_dataset_as_draft(1) response_2 = self.client.get(url).data # ensure the counts and byte sizes are calculated without drafts - self.assertNotEqual(response_1[0]['count'], response_2[0]['count'], - 'Count for June should reduce by one as dataset id=1 was set as draft') - self.assertNotEqual(response_1[0]['ida_byte_size'], response_2[0]['ida_byte_size'], - 'Byte size for June should reduce by one as dataset id=1 was set as draft') + self.assertNotEqual( + response_1[0]["count"], + response_2[0]["count"], + "Count for June should reduce by one as dataset id=1 was set as draft", + ) + self.assertNotEqual( + response_1[0]["ida_byte_size"], + response_2[0]["ida_byte_size"], + "Byte size for June should reduce by one as dataset id=1 was set as draft", + ) def test_catalog_datasets_cumulative_for_drafts(self): """ Tests that /rpc/v2/statistics/catalog_datasets_cumulative returns only published datasets """ - url = '/rpc/v2/statistics/catalog_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30' + url = ( + "/rpc/v2/statistics/catalog_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" + ) catalog = "urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d3" - self._set_dataset_creation_date(1, '2019-06-15') - self._set_cr_datacatalog(1, catalog) # Adds id=1 to catalog + self._set_dataset_creation_date(1, "2019-06-15") + self._set_cr_datacatalog(1, catalog) # Adds id=1 to catalog - count_1 = self.client.get(url).data[catalog]['open'][0]['count'] - total_1 = self.client.get(url).data[catalog]['total'] + count_1 = self.client.get(url).data[catalog]["open"][0]["count"] + total_1 = self.client.get(url).data[catalog]["total"] self._set_dataset_as_draft(1) - count_2 = self.client.get(url).data[catalog]['open'][0]['count'] - total_2 = self.client.get(url).data[catalog]['total'] + count_2 = self.client.get(url).data[catalog]["open"][0]["count"] + total_2 = self.client.get(url).data[catalog]["total"] # ensure the count and total are calculated without drafts - self.assertNotEqual(count_1, count_2, 'Count should reduce by one as dataset id=1 was set as draft') - self.assertNotEqual(total_1, total_2, 'Total should reduce by one as dataset id=1 was set as draft') + self.assertNotEqual( + count_1, + count_2, + "Count should reduce by one as dataset id=1 was set as draft", + ) + self.assertNotEqual( + total_1, + total_2, + "Total should reduce by one as dataset id=1 was set as draft", + ) def test_end_user_datasets_cumulative_for_drafts(self): - ''' End user api should return only published data ''' - url = '/rpc/v2/statistics/end_user_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30' + """ End user api should return only published data """ + url = "/rpc/v2/statistics/end_user_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - self._set_dataset_creation_date(10, '2019-06-15') - count_1 = self.client.get(url).data[0]['count'] + self._set_dataset_creation_date(10, "2019-06-15") + count_1 = self.client.get(url).data[0]["count"] self._set_dataset_as_draft(10) - count_2 = self.client.get(url).data[0]['count'] + count_2 = self.client.get(url).data[0]["count"] # ensure the count are calculated without drafts - self.assertNotEqual(count_1, count_2, 'Count should be reduced by one after setting id=10 as draft') + self.assertNotEqual( + count_1, + count_2, + "Count should be reduced by one after setting id=10 as draft", + ) def test_organization_datasets_cumulative_for_drafts(self): - ''' Organization api should return only published data ''' + """ Organization api should return only published data """ url = "/rpc/v2/statistics/organization_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - self._set_dataset_creation_date(1, '2019-06-15') - self._set_cr_organization(1, 'org_2') - total_1 = self.client.get(url).data['org_2']['total'] + self._set_dataset_creation_date(1, "2019-06-15") + self._set_cr_organization(1, "org_2") + total_1 = self.client.get(url).data["org_2"]["total"] self._set_dataset_as_draft(1) - total_2 = self.client.get(url).data['org_2']['total'] + total_2 = self.client.get(url).data["org_2"]["total"] # ensure the totals are calculated without drafts - self.assertNotEqual(total_1, total_2, 'Count be reduced by one after setting id=1 as draft') \ No newline at end of file + self.assertNotEqual(total_1, total_2, "Count be reduced by one after setting id=1 as draft") diff --git a/src/metax_api/tests/management/commands/loadinitialdata.py b/src/metax_api/tests/management/commands/loadinitialdata.py index 2d64f295..fcc576b2 100755 --- a/src/metax_api/tests/management/commands/loadinitialdata.py +++ b/src/metax_api/tests/management/commands/loadinitialdata.py @@ -24,8 +24,8 @@ class LoadInitialDataTest(LiveServerTestCase): def setUp(self, *args, **kwargs): self._test_settings = { - 'metax_url': self.live_server_url, - 'metax_credentials': settings.API_METAX_USER, + "metax_url": self.live_server_url, + "metax_credentials": settings.API_METAX_USER, } super(LoadInitialDataTest, self).setUp(*args, **kwargs) @@ -34,11 +34,17 @@ def test_create(self): storages_before = FileStorage.objects.all().count() out = StringIO() - call_command('loadinitialdata', **{ 'test_settings': self._test_settings, 'stdout': out, }) + call_command( + "loadinitialdata", + **{ + "test_settings": self._test_settings, + "stdout": out, + }, + ) cmd_output = out.getvalue() - self.assertIn('Created catalog', cmd_output) - self.assertIn('Created file storage', cmd_output) + self.assertIn("Created catalog", cmd_output) + self.assertIn("Created file storage", cmd_output) self.assertEqual(catalogs_before < DataCatalog.objects.all().count(), True) self.assertEqual(storages_before < FileStorage.objects.all().count(), True) @@ -48,10 +54,22 @@ def test_update(self): """ out = StringIO() # create - call_command('loadinitialdata', **{ 'test_settings': self._test_settings, 'stdout': out, }) + call_command( + "loadinitialdata", + **{ + "test_settings": self._test_settings, + "stdout": out, + }, + ) # update - call_command('loadinitialdata', **{ 'test_settings': self._test_settings, 'stdout': out, }) + call_command( + "loadinitialdata", + **{ + "test_settings": self._test_settings, + "stdout": out, + }, + ) cmd_output = out.getvalue() - self.assertIn('Updated catalog', cmd_output) - self.assertIn('Updated file storage', cmd_output) + self.assertIn("Updated catalog", cmd_output) + self.assertIn("Updated file storage", cmd_output) diff --git a/src/metax_api/tests/middleware/test_middleware.py b/src/metax_api/tests/middleware/test_middleware.py index ce5dd2a0..45fdb24b 100755 --- a/src/metax_api/tests/middleware/test_middleware.py +++ b/src/metax_api/tests/middleware/test_middleware.py @@ -44,18 +44,18 @@ def test_authorization_not_required(self): # reset credentials self.client.credentials() - response = self.client.get('/rest/datasets') + response = self.client.get("/rest/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertNotEqual(response.status_code, FORBIDDEN) def test_optional_authorizatiaon_during_get(self): """ If auth headers are passed during GET, the user should then be identified by them. """ - response = self.client.get('/rest/datasets') + response = self.client.get("/rest/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self.assertNotEqual(response.status_code, FORBIDDEN) def test_optional_authorizatiaon_during_get_fails(self): @@ -63,8 +63,8 @@ def test_optional_authorizatiaon_during_get_fails(self): If auth headers are passed during GET, the user should then be identified by them. And if credentials are wrong, then access is forbidden """ - self._use_http_authorization(username='nope', password='wrong') - response = self.client.get('/rest/datasets') + self._use_http_authorization(username="nope", password="wrong") + response = self.client.get("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) # @@ -81,13 +81,13 @@ def test_authorization_ok(self): The following requests are invalid by their content, but none should fail to the very first step of identifying the api caller. """ - response = self.client.post('/rest/datasets') + response = self.client.post("/rest/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.put('/rest/datasets') + response = self.client.put("/rest/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.patch('/rest/datasets') + response = self.client.patch("/rest/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.delete('/rest/datasets') + response = self.client.delete("/rest/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) def test_unknown_user(self): @@ -95,25 +95,25 @@ def test_unknown_user(self): Unknown user credentials, every request should fail to the very first step of identifying the api caller. """ - self._use_http_authorization(username='other', password='pw') - response = self.client.post('/rest/datasets') + self._use_http_authorization(username="other", password="pw") + response = self.client.post("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.put('/rest/datasets') + response = self.client.put("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.patch('/rest/datasets') + response = self.client.patch("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.delete('/rest/datasets') + response = self.client.delete("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) def test_wrong_password(self): - self._use_http_authorization(password='wrongpassword') - response = self.client.post('/rest/datasets') + self._use_http_authorization(password="wrongpassword") + response = self.client.post("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.put('/rest/datasets') + response = self.client.put("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.patch('/rest/datasets') + response = self.client.patch("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.delete('/rest/datasets') + response = self.client.delete("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) # @@ -128,74 +128,80 @@ def test_wrong_password(self): # def test_malformed_auth_header(self): - self._use_http_authorization(header_value='Basic hubbabubba') - response = self.client.post('/rest/datasets') + self._use_http_authorization(header_value="Basic hubbabubba") + response = self.client.post("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) def test_invalid_auth_method(self): - self._use_http_authorization(header_value='NotSupported hubbabubba') - response = self.client.post('/rest/datasets') + self._use_http_authorization(header_value="NotSupported hubbabubba") + response = self.client.post("/rest/datasets") self.assertEqual(response.status_code, FORBIDDEN) class ApiModifyResponseTestV1(CatalogRecordApiWriteCommon): - def test_catalog_record_get_last_modified_header(self): - response = self.client.get('/rest/datasets/1') + response = self.client.get("/rest/datasets/1") self._validate_response(response) def test_catalog_record_post_last_modified_header(self): - response = self.client.post('/rest/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self._validate_response(response) def test_catalog_record_put_last_modified_header(self): - cr = self.client.get('/rest/datasets/1', format="json").data - cr['preservation_description'] = 'what' - response = self.client.put('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1", format="json").data + cr["preservation_description"] = "what" + response = self.client.put("/rest/datasets/1", cr, format="json") self._validate_response(response) def test_catalog_record_patch_last_modified_header(self): - cr = self.client.get('/rest/datasets/1', format="json").data - cr['preservation_description'] = 'what' - response = self.client.patch('/rest/datasets/1', cr, format="json") + cr = self.client.get("/rest/datasets/1", format="json").data + cr["preservation_description"] = "what" + response = self.client.patch("/rest/datasets/1", cr, format="json") self._validate_response(response) def test_catalog_record_delete_does_not_contain_last_modified_header(self): - response = self.client.delete('/rest/datasets/1') - self.assertFalse(response.has_header('Last-Modified')) + response = self.client.delete("/rest/datasets/1") + self.assertFalse(response.has_header("Last-Modified")) def test_catalog_record_bulk_create_get_last_modified_header(self): - response = self.client.post('/rest/datasets', [self.cr_test_data, self.cr_test_data], format="json") + response = self.client.post( + "/rest/datasets", [self.cr_test_data, self.cr_test_data], format="json" + ) self._validate_response(response) def _validate_response(self, response): - data = response.data.get('success', response.data) - obj = data[0].get('object', None) if isinstance(data, list) else data + data = response.data.get("success", response.data) + obj = data[0].get("object", None) if isinstance(data, list) else data self.assertIsNotNone(obj) - expected_modified_str = obj['date_modified'] if 'date_modified' in obj else obj.get('date_created', None) - expected_modified = timezone.localtime(parse_timestamp_string_to_tz_aware_datetime(expected_modified_str), - timezone=tz('GMT')) + expected_modified_str = ( + obj["date_modified"] if "date_modified" in obj else obj.get("date_created", None) + ) + expected_modified = timezone.localtime( + parse_timestamp_string_to_tz_aware_datetime(expected_modified_str), + timezone=tz("GMT"), + ) - self.assertTrue(response.has_header('Last-Modified')) - actual_modified = timezone.localtime(parse_timestamp_string_to_tz_aware_datetime(response.get('Last-Modified')), - timezone=tz('GMT')) + self.assertTrue(response.has_header("Last-Modified")) + actual_modified = timezone.localtime( + parse_timestamp_string_to_tz_aware_datetime(response.get("Last-Modified")), + timezone=tz("GMT"), + ) self.assertEqual(expected_modified, actual_modified) class ApiStreamHttpResponse(CatalogRecordApiWriteCommon): - def test_no_streaming_with_paging(self): - response = self.client.get('/rest/datasets?stream=true') + response = self.client.get("/rest/datasets?stream=true") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.streaming, False) def test_streaming_json(self): - response = self.client.get('/rest/datasets?pagination=false&stream=true') + response = self.client.get("/rest/datasets?pagination=false&stream=true") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.streaming, True) - response = self.client.get('/rest/files?pagination=false&stream=true') + response = self.client.get("/rest/files?pagination=false&stream=true") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.streaming, True) diff --git a/src/metax_api/tests/middleware/test_middleware_v2.py b/src/metax_api/tests/middleware/test_middleware_v2.py index fb465f84..4b3d791d 100755 --- a/src/metax_api/tests/middleware/test_middleware_v2.py +++ b/src/metax_api/tests/middleware/test_middleware_v2.py @@ -44,18 +44,18 @@ def test_authorization_not_required(self): # reset credentials self.client.credentials() - response = self.client.get('/rest/v2/datasets') + response = self.client.get("/rest/v2/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertNotEqual(response.status_code, FORBIDDEN) def test_optional_authorizatiaon_during_get(self): """ If auth headers are passed during GET, the user should then be identified by them. """ - response = self.client.get('/rest/v2/datasets') + response = self.client.get("/rest/v2/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self.assertNotEqual(response.status_code, FORBIDDEN) def test_optional_authorizatiaon_during_get_fails(self): @@ -63,8 +63,8 @@ def test_optional_authorizatiaon_during_get_fails(self): If auth headers are passed during GET, the user should then be identified by them. And if credentials are wrong, then access is forbidden """ - self._use_http_authorization(username='nope', password='wrong') - response = self.client.get('/rest/v2/datasets') + self._use_http_authorization(username="nope", password="wrong") + response = self.client.get("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) # @@ -81,13 +81,13 @@ def test_authorization_ok(self): The following requests are invalid by their content, but none should fail to the very first step of identifying the api caller. """ - response = self.client.post('/rest/v2/datasets') + response = self.client.post("/rest/v2/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.put('/rest/v2/datasets') + response = self.client.put("/rest/v2/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.patch('/rest/v2/datasets') + response = self.client.patch("/rest/v2/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.delete('/rest/v2/datasets') + response = self.client.delete("/rest/v2/datasets") self.assertNotEqual(response.status_code, FORBIDDEN) def test_unknown_user(self): @@ -95,25 +95,25 @@ def test_unknown_user(self): Unknown user credentials, every request should fail to the very first step of identifying the api caller. """ - self._use_http_authorization(username='other', password='pw') - response = self.client.post('/rest/v2/datasets') + self._use_http_authorization(username="other", password="pw") + response = self.client.post("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.put('/rest/v2/datasets') + response = self.client.put("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.patch('/rest/v2/datasets') + response = self.client.patch("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.delete('/rest/v2/datasets') + response = self.client.delete("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) def test_wrong_password(self): - self._use_http_authorization(password='wrongpassword') - response = self.client.post('/rest/v2/datasets') + self._use_http_authorization(password="wrongpassword") + response = self.client.post("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.put('/rest/v2/datasets') + response = self.client.put("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.patch('/rest/v2/datasets') + response = self.client.patch("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.delete('/rest/v2/datasets') + response = self.client.delete("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) # @@ -128,73 +128,79 @@ def test_wrong_password(self): # def test_malformed_auth_header(self): - self._use_http_authorization(header_value='Basic hubbabubba') - response = self.client.post('/rest/v2/datasets') + self._use_http_authorization(header_value="Basic hubbabubba") + response = self.client.post("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) def test_invalid_auth_method(self): - self._use_http_authorization(header_value='NotSupported hubbabubba') - response = self.client.post('/rest/v2/datasets') + self._use_http_authorization(header_value="NotSupported hubbabubba") + response = self.client.post("/rest/v2/datasets") self.assertEqual(response.status_code, FORBIDDEN) class ApiModifyResponseTest(CatalogRecordApiWriteCommon): - def test_catalog_record_get_last_modified_header(self): - response = self.client.get('/rest/v2/datasets/1') + response = self.client.get("/rest/v2/datasets/1") self._validate_response(response) def test_catalog_record_post_last_modified_header(self): - response = self.client.post('/rest/v2/datasets', self.cr_test_data, format="json") + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self._validate_response(response) def test_catalog_record_put_last_modified_header(self): - cr = self.client.get('/rest/v2/datasets/1', format="json").data - cr['preservation_description'] = 'what' - response = self.client.put('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1", format="json").data + cr["preservation_description"] = "what" + response = self.client.put("/rest/v2/datasets/1", cr, format="json") self._validate_response(response) def test_catalog_record_patch_last_modified_header(self): - cr = self.client.get('/rest/v2/datasets/1', format="json").data - cr['preservation_description'] = 'what' - response = self.client.patch('/rest/v2/datasets/1', cr, format="json") + cr = self.client.get("/rest/v2/datasets/1", format="json").data + cr["preservation_description"] = "what" + response = self.client.patch("/rest/v2/datasets/1", cr, format="json") self._validate_response(response) def test_catalog_record_delete_does_not_contain_last_modified_header(self): - response = self.client.delete('/rest/v2/datasets/1') - self.assertFalse(response.has_header('Last-Modified')) + response = self.client.delete("/rest/v2/datasets/1") + self.assertFalse(response.has_header("Last-Modified")) def test_catalog_record_bulk_create_get_last_modified_header(self): - response = self.client.post('/rest/v2/datasets', [self.cr_test_data, self.cr_test_data], format="json") + response = self.client.post( + "/rest/v2/datasets", [self.cr_test_data, self.cr_test_data], format="json" + ) self._validate_response(response) def _validate_response(self, response): - data = response.data.get('success', response.data) - obj = data[0].get('object', None) if isinstance(data, list) else data + data = response.data.get("success", response.data) + obj = data[0].get("object", None) if isinstance(data, list) else data self.assertIsNotNone(obj) - expected_modified_str = obj['date_modified'] if 'date_modified' in obj else obj.get('date_created', None) - expected_modified = timezone.localtime(parse_timestamp_string_to_tz_aware_datetime(expected_modified_str), - timezone=tz('GMT')) + expected_modified_str = ( + obj["date_modified"] if "date_modified" in obj else obj.get("date_created", None) + ) + expected_modified = timezone.localtime( + parse_timestamp_string_to_tz_aware_datetime(expected_modified_str), + timezone=tz("GMT"), + ) - self.assertTrue(response.has_header('Last-Modified')) - actual_modified = timezone.localtime(parse_timestamp_string_to_tz_aware_datetime(response.get('Last-Modified')), - timezone=tz('GMT')) + self.assertTrue(response.has_header("Last-Modified")) + actual_modified = timezone.localtime( + parse_timestamp_string_to_tz_aware_datetime(response.get("Last-Modified")), + timezone=tz("GMT"), + ) self.assertEqual(expected_modified, actual_modified) class ApiStreamHttpResponse(CatalogRecordApiWriteCommon): - def test_no_streaming_with_paging(self): - response = self.client.get('/rest/v2/datasets?stream=true') + response = self.client.get("/rest/v2/datasets?stream=true") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.streaming, False) def test_streaming_json(self): - response = self.client.get('/rest/v2/datasets?no_pagination=true&stream=true') + response = self.client.get("/rest/v2/datasets?no_pagination=true&stream=true") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.streaming, True) - response = self.client.get('/rest/v2/files?no_pagination=true&stream=true') + response = self.client.get("/rest/v2/files?no_pagination=true&stream=true") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.streaming, True) diff --git a/src/metax_api/tests/models/catalog_record.py b/src/metax_api/tests/models/catalog_record.py index da77689c..5a3bbe61 100755 --- a/src/metax_api/tests/models/catalog_record.py +++ b/src/metax_api/tests/models/catalog_record.py @@ -23,22 +23,28 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(CatalogRecordModelBasicTest, cls).setUpClass() def setUp(self): - dataset_from_test_data = self._get_object_from_test_data('catalogrecord') - self.metadata_version_identifier = dataset_from_test_data['research_dataset']['metadata_version_identifier'] - self.identifier = dataset_from_test_data['identifier'] + dataset_from_test_data = self._get_object_from_test_data("catalogrecord") + self.metadata_version_identifier = dataset_from_test_data["research_dataset"][ + "metadata_version_identifier" + ] + self.identifier = dataset_from_test_data["identifier"] def test_get_by_identifiers(self): catalog_record = CatalogRecord.objects.get(identifier=self.identifier) self.assertEqual(catalog_record.identifier, self.identifier) catalog_record = CatalogRecord.objects.get( - research_dataset__contains={'metadata_version_identifier': self.metadata_version_identifier} + research_dataset__contains={ + "metadata_version_identifier": self.metadata_version_identifier + } + ) + self.assertEqual( + catalog_record.metadata_version_identifier, self.metadata_version_identifier ) - self.assertEqual(catalog_record.metadata_version_identifier, self.metadata_version_identifier) class CatalogRecordModelTests(TestCase, TestClassUtils): @@ -47,7 +53,7 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(CatalogRecordModelTests, cls).setUpClass() def setUp(self): @@ -55,53 +61,58 @@ def setUp(self): def test_disallow_total_files_byte_size_manual_update(self): cr = self.cr - old = cr.research_dataset['total_files_byte_size'] - cr.research_dataset['total_files_byte_size'] = 999 + old = cr.research_dataset["total_files_byte_size"] + cr.research_dataset["total_files_byte_size"] = 999 cr.save() - self.assertEqual(old, cr.research_dataset['total_files_byte_size']) + self.assertEqual(old, cr.research_dataset["total_files_byte_size"]) def test_disallow_metadata_version_identifier_manual_update(self): cr = self.cr - old = cr.research_dataset['metadata_version_identifier'] - cr.research_dataset['metadata_version_identifier'] = 'changed' + old = cr.research_dataset["metadata_version_identifier"] + cr.research_dataset["metadata_version_identifier"] = "changed" cr.save() - self.assertEqual(old, cr.research_dataset['metadata_version_identifier']) + self.assertEqual(old, cr.research_dataset["metadata_version_identifier"]) def test_total_files_byte_size_auto_update_on_files_changed(self): """ Changing files of a dataset creates a new version, so make sure that the file size of the old version does NOT change, and the file size of the new version DOES change. """ - already_included_files = CatalogRecord.objects.get(pk=1).files.all().values_list('id', flat=True) + already_included_files = ( + CatalogRecord.objects.get(pk=1).files.all().values_list("id", flat=True) + ) new_file_id = File.objects.all().exclude(id__in=already_included_files).first().id - file_from_testdata = self._get_object_from_test_data('file', requested_index=new_file_id) + file_from_testdata = self._get_object_from_test_data("file", requested_index=new_file_id) cr = CatalogRecord.objects.get(pk=1) - old = cr.research_dataset['total_files_byte_size'] - cr.research_dataset['files'] = [file_from_testdata] + old = cr.research_dataset["total_files_byte_size"] + cr.research_dataset["files"] = [file_from_testdata] cr.save() new_version = cr.next_dataset_version - self.assertEqual(old, cr.research_dataset['total_files_byte_size']) - self.assertNotEqual(old, new_version.research_dataset['total_files_byte_size']) + self.assertEqual(old, cr.research_dataset["total_files_byte_size"]) + self.assertNotEqual(old, new_version.research_dataset["total_files_byte_size"]) def test_preservation_state_modified_auto_update(self): cr = self.cr old = cr.preservation_state_modified cr.preservation_state = 1 cr.save() - self.assertNotEqual(old, cr.preservation_state_modified, - 'preservation_state_modified should be automatically updated if changed') + self.assertNotEqual( + old, + cr.preservation_state_modified, + "preservation_state_modified should be automatically updated if changed", + ) class CatalogRecordManagerTests(TestCase, TestClassUtils): @classmethod def setUpClass(cls): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(CatalogRecordManagerTests, cls).setUpClass() def test_get_using_dict_with_id(self): - row = {'id': 1, 'other_stuff': 'doesnt matter'} + row = {"id": 1, "other_stuff": "doesnt matter"} try: obj = CatalogRecord.objects.get(using_dict=row) except CatalogRecord.DoesNotExist: @@ -109,15 +120,15 @@ def test_get_using_dict_with_id(self): else: found = True - self.assertEqual(found, True, 'get with using_dict should have returned a result') + self.assertEqual(found, True, "get with using_dict should have returned a result") self.assertEqual(obj.id, 1) def test_get_using_dict_with_metadata_version_identifier(self): row = { - 'research_dataset': { - 'metadata_version_identifier': CatalogRecord.objects.first().metadata_version_identifier + "research_dataset": { + "metadata_version_identifier": CatalogRecord.objects.first().metadata_version_identifier }, - 'other_stuff': 'doesnt matter' + "other_stuff": "doesnt matter", } try: obj = CatalogRecord.objects.get(using_dict=row) @@ -126,11 +137,11 @@ def test_get_using_dict_with_metadata_version_identifier(self): else: found = True - self.assertEqual(found, True, 'get with using_dict should have returned a result') + self.assertEqual(found, True, "get with using_dict should have returned a result") self.assertEqual(obj.id, 1) def test_get_using_dict_error_not_found_1(self): - row = {'id': 101010, 'other_stuff': 'doesnt matter'} + row = {"id": 101010, "other_stuff": "doesnt matter"} try: CatalogRecord.objects.get(using_dict=row) except CatalogRecord.DoesNotExist: @@ -138,11 +149,15 @@ def test_get_using_dict_error_not_found_1(self): else: found = True - self.assertEqual(found, False, 'get with using_dict should have not returned a result') + self.assertEqual(found, False, "get with using_dict should have not returned a result") def test_get_using_dict_error_preferred_identifier_not_allowed(self): - row = {'research_dataset': {'preferred_identifier': CatalogRecord.objects.first().preferred_identifier}, - 'other_stuff': 'doesnt matter'} + row = { + "research_dataset": { + "preferred_identifier": CatalogRecord.objects.first().preferred_identifier + }, + "other_stuff": "doesnt matter", + } try: CatalogRecord.objects.get(using_dict=row) except ValidationError: @@ -150,11 +165,14 @@ def test_get_using_dict_error_preferred_identifier_not_allowed(self): else: found = True - self.assertEqual(found, False, - 'get with using_dict should have not returned a result, because preferred_identifier was used') + self.assertEqual( + found, + False, + "get with using_dict should have not returned a result, because preferred_identifier was used", + ) def test_get_using_dict_error_identifier_field_missing(self): - row = {'somefield': 111, 'other_stuff': 'doesnt matter'} + row = {"somefield": 111, "other_stuff": "doesnt matter"} try: CatalogRecord.objects.get(using_dict=row) except ValidationError: @@ -162,5 +180,8 @@ def test_get_using_dict_error_identifier_field_missing(self): else: found = True - self.assertEqual(found, False, - 'get with using_dict should have not returned a result because an identifier field is missing') + self.assertEqual( + found, + False, + "get with using_dict should have not returned a result because an identifier field is missing", + ) diff --git a/src/metax_api/tests/models/common.py b/src/metax_api/tests/models/common.py index f9a73ffe..3d008cd4 100755 --- a/src/metax_api/tests/models/common.py +++ b/src/metax_api/tests/models/common.py @@ -14,9 +14,8 @@ class CommonModelTests(TestCase, TestClassUtils): - def setUp(self): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) def get(self): return CatalogRecord.objects.get(pk=1) @@ -37,7 +36,7 @@ def test_field_changed_ok(self): cr = self.get() cr.preservation_state = 2 - self.assertEqual(cr.field_changed('preservation_state'), True) + self.assertEqual(cr.field_changed("preservation_state"), True) def test_field_not_changed_ok(self): cr = self.get() @@ -46,12 +45,15 @@ def test_field_not_changed_ok(self): cr = self.get() cr.preservation_state = 1 - self.assertEqual(cr.field_changed('preservation_state'), False) + self.assertEqual(cr.field_changed("preservation_state"), False) def test_field_is_not_tracked(self): cr = self.get() - with self.assertRaises(FieldError, msg='field is not tracked, so checking changes should be an error'): - cr.field_changed('user_modified') + with self.assertRaises( + FieldError, + msg="field is not tracked, so checking changes should be an error", + ): + cr.field_changed("user_modified") def test_field_is_tracked_but_not_loaded(self): """ @@ -62,20 +64,23 @@ def test_field_is_tracked_but_not_loaded(self): However, if the field does end up being modified, then the caller should make sure the field is included when the object is being created. """ - queryset = CatalogRecord.objects.filter(pk=1).only('id') + queryset = CatalogRecord.objects.filter(pk=1).only("id") cr = queryset[0] cr.preservation_state = 2 - with self.assertRaises(FieldError, msg='field is not loaded in init, so checking changes should be an error'): - cr.field_changed('preservation_state') + with self.assertRaises( + FieldError, + msg="field is not loaded in init, so checking changes should be an error", + ): + cr.field_changed("preservation_state") def test_field_is_tracked_and_explicitly_loaded(self): """ Same as above, but the caller makes sure to include the field in the ORM query. """ - queryset = CatalogRecord.objects.filter(pk=1).only('preservation_state') + queryset = CatalogRecord.objects.filter(pk=1).only("preservation_state") cr = queryset[0] cr.preservation_state = 2 - self.assertEqual(cr.field_changed('preservation_state'), True) + self.assertEqual(cr.field_changed("preservation_state"), True) class CommonModelTrackJsonFieldsTests(CommonModelTests): @@ -87,17 +92,20 @@ class CommonModelTrackJsonFieldsTests(CommonModelTests): def test_json_field_changed_ok(self): cr = self.get() - cr.research_dataset['preferred_identifier'] = 'new' - self.assertEqual(cr.field_changed('research_dataset.preferred_identifier'), True) + cr.research_dataset["preferred_identifier"] = "new" + self.assertEqual(cr.field_changed("research_dataset.preferred_identifier"), True) def test_json_field_not_changed_ok(self): cr = self.get() - cr.research_dataset['preferred_identifier'] = cr.research_dataset['preferred_identifier'] - self.assertEqual(cr.field_changed('research_dataset.preferred_identifier'), False) + cr.research_dataset["preferred_identifier"] = cr.research_dataset["preferred_identifier"] + self.assertEqual(cr.field_changed("research_dataset.preferred_identifier"), False) def test_json_field_is_tracked_but_not_loaded(self): - queryset = CatalogRecord.objects.filter(pk=1).only('id') + queryset = CatalogRecord.objects.filter(pk=1).only("id") cr = queryset[0] - cr.research_dataset['preferred_identifier'] = cr.research_dataset['preferred_identifier'] - with self.assertRaises(FieldError, msg='field is not loaded in init, so checking changes should be an error'): - cr.field_changed('research_dataset.preferred_identifier') + cr.research_dataset["preferred_identifier"] = cr.research_dataset["preferred_identifier"] + with self.assertRaises( + FieldError, + msg="field is not loaded in init, so checking changes should be an error", + ): + cr.field_changed("research_dataset.preferred_identifier") diff --git a/src/metax_api/tests/models/data_catalog.py b/src/metax_api/tests/models/data_catalog.py index a0844470..8f3fc5a9 100755 --- a/src/metax_api/tests/models/data_catalog.py +++ b/src/metax_api/tests/models/data_catalog.py @@ -18,7 +18,7 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DataCatalogModelTests, cls).setUpClass() def setUp(self): @@ -26,7 +26,7 @@ def setUp(self): def test_disallow_identifier_manual_update(self): dc = self.dc - old = dc.catalog_json['identifier'] - dc.catalog_json['identifier'] = 'changed value' + old = dc.catalog_json["identifier"] + dc.catalog_json["identifier"] = "changed value" dc.save() - self.assertEqual(old, dc.catalog_json['identifier']) + self.assertEqual(old, dc.catalog_json["identifier"]) diff --git a/src/metax_api/tests/models/directory.py b/src/metax_api/tests/models/directory.py index d1948a9c..18917438 100755 --- a/src/metax_api/tests/models/directory.py +++ b/src/metax_api/tests/models/directory.py @@ -14,13 +14,12 @@ class DirectoryModelTests(APITestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(DirectoryModelTests, cls).setUpClass() def setUp(self): @@ -34,13 +33,16 @@ def test_calculate_byte_size_and_file_count(self): for root_dir in Directory.objects.filter(parent_directory_id=None): root_dir.calculate_byte_size_and_file_count() - byte_size = File.objects.filter(project_identifier=root_dir.project_identifier) \ - .aggregate(Sum('byte_size'))['byte_size__sum'] + byte_size = File.objects.filter( + project_identifier=root_dir.project_identifier + ).aggregate(Sum("byte_size"))["byte_size__sum"] file_count = File.objects.filter(project_identifier=root_dir.project_identifier).count() - response = self.client.get('/rest/directories/root?project=%s' % root_dir.project_identifier) - self.assertEqual(response.data['byte_size'], byte_size) - self.assertEqual(response.data['file_count'], file_count) + response = self.client.get( + "/rest/directories/root?project=%s" % root_dir.project_identifier + ) + self.assertEqual(response.data["byte_size"], byte_size) + self.assertEqual(response.data["file_count"], file_count) def test_disallow_calculate_byte_size_and_file_count_for_non_root(self): with self.assertRaises(Exception): diff --git a/src/metax_api/tests/models/file.py b/src/metax_api/tests/models/file.py index 5f04040e..ce6b4f0f 100755 --- a/src/metax_api/tests/models/file.py +++ b/src/metax_api/tests/models/file.py @@ -25,13 +25,13 @@ def setUpClass(cls): """ Loaded only once for test cases inside this class. """ - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileModelBasicTest, cls).setUpClass() def setUp(self): - file_from_test_data = self._get_object_from_test_data('file') - self.identifier = file_from_test_data['identifier'] - self.result_file_name = file_from_test_data['file_name'] + file_from_test_data = self._get_object_from_test_data("file") + self.identifier = file_from_test_data["identifier"] + self.result_file_name = file_from_test_data["file_name"] def test_get_by_identifier(self): file = File.objects.get(identifier=self.identifier) @@ -41,16 +41,16 @@ def test_get_by_identifier(self): class FileManagerTests(TestCase, TestClassUtils): @classmethod def setUpClass(cls): - call_command('loaddata', test_data_file_path, verbosity=0) + call_command("loaddata", test_data_file_path, verbosity=0) super(FileManagerTests, cls).setUpClass() def setUp(self): - file_from_test_data = self._get_object_from_test_data('file', requested_index=0) - self.identifier = file_from_test_data['identifier'] - self.pk = file_from_test_data['id'] + file_from_test_data = self._get_object_from_test_data("file", requested_index=0) + self.identifier = file_from_test_data["identifier"] + self.pk = file_from_test_data["id"] def test_get_using_dict_with_id(self): - row = {'id': 1, 'other_stuff': 'doesnt matter'} + row = {"id": 1, "other_stuff": "doesnt matter"} try: obj = File.objects.get(using_dict=row) except File.DoesNotExist: @@ -58,11 +58,11 @@ def test_get_using_dict_with_id(self): else: found = True - self.assertEqual(found, True, 'get with using_dict should have returned a result') + self.assertEqual(found, True, "get with using_dict should have returned a result") self.assertEqual(obj.id, 1) def test_get_using_dict_with_identifier(self): - row = {'identifier': self.identifier, 'other_stuff': 'doesnt matter'} + row = {"identifier": self.identifier, "other_stuff": "doesnt matter"} try: obj = File.objects.get(using_dict=row) except File.DoesNotExist: @@ -70,11 +70,11 @@ def test_get_using_dict_with_identifier(self): else: found = True - self.assertEqual(found, True, 'get with using_dict should have returned a result') + self.assertEqual(found, True, "get with using_dict should have returned a result") self.assertEqual(obj.id, 1) def test_get_using_dict_error_not_found_1(self): - row = {'id': 101010, 'other_stuff': 'doesnt matter'} + row = {"id": 101010, "other_stuff": "doesnt matter"} try: File.objects.get(using_dict=row) except File.DoesNotExist: @@ -82,11 +82,15 @@ def test_get_using_dict_error_not_found_1(self): else: found = True - self.assertEqual(found, False, 'get with using_dict should have not returned a result') + self.assertEqual(found, False, "get with using_dict should have not returned a result") def test_get_using_dict_error_preferred_identifier_not_allowed(self): - row = {'research_dataset': {'preferred_identifier': 'urn:nbn:fi:att:e1a2a565-f4e5-4b55-bc92-790482e69845'}, - 'other_stuff': 'doesnt matter'} + row = { + "research_dataset": { + "preferred_identifier": "urn:nbn:fi:att:e1a2a565-f4e5-4b55-bc92-790482e69845" + }, + "other_stuff": "doesnt matter", + } try: File.objects.get(using_dict=row) except ValidationError: @@ -94,11 +98,14 @@ def test_get_using_dict_error_preferred_identifier_not_allowed(self): else: found = True - self.assertEqual(found, False, - 'get with using_dict should have not returned a result, because preferred_identifier was used') + self.assertEqual( + found, + False, + "get with using_dict should have not returned a result, because preferred_identifier was used", + ) def test_get_using_dict_error_identifier_field_missing(self): - row = {'somefield': 111, 'other_stuff': 'doesnt matter'} + row = {"somefield": 111, "other_stuff": "doesnt matter"} try: File.objects.get(using_dict=row) except ValidationError: @@ -106,5 +113,8 @@ def test_get_using_dict_error_identifier_field_missing(self): else: found = True - self.assertEqual(found, False, - 'get with using_dict should have not returned a result because an identifier field is missing') + self.assertEqual( + found, + False, + "get with using_dict should have not returned a result because an identifier field is missing", + ) diff --git a/src/metax_api/tests/rabbitmq/consume.py b/src/metax_api/tests/rabbitmq/consume.py index 5837637b..c8eff3f5 100755 --- a/src/metax_api/tests/rabbitmq/consume.py +++ b/src/metax_api/tests/rabbitmq/consume.py @@ -14,26 +14,21 @@ script to listen for messages sent when someone accesses /rest/datasets/pid/rabbitmq """ -test_user = { - 'name': 'testaaja', - 'password': 'testaaja', - 'vhost': 'metax' -} +test_user = {"name": "testaaja", "password": "testaaja", "vhost": "metax"} -credentials = pika.PlainCredentials(test_user['name'], test_user['password']) +credentials = pika.PlainCredentials(test_user["name"], test_user["password"]) connection = pika.BlockingConnection( pika.ConnectionParameters( - settings['HOSTS'][0], - settings['PORT'], - test_user['vhost'], - credentials)) + settings["HOSTS"][0], settings["PORT"], test_user["vhost"], credentials + ) +) channel = connection.channel() -exchange = 'datasets' -queue_1 = 'testaaja-create' -queue_2 = 'testaaja-update' -queue_3 = 'testaaja-delete' +exchange = "datasets" +queue_1 = "testaaja-create" +queue_2 = "testaaja-update" +queue_3 = "testaaja-delete" # note: requires write permission to exchanges # channel.exchange_declare(exchange=exchange, type='fanout') @@ -41,9 +36,9 @@ channel.queue_declare(queue_2, durable=True) channel.queue_declare(queue_3, durable=True) -channel.queue_bind(exchange=exchange, queue=queue_1, routing_key='create') -channel.queue_bind(exchange=exchange, queue=queue_2, routing_key='update') -channel.queue_bind(exchange=exchange, queue=queue_3, routing_key='delete') +channel.queue_bind(exchange=exchange, queue=queue_1, routing_key="create") +channel.queue_bind(exchange=exchange, queue=queue_2, routing_key="update") +channel.queue_bind(exchange=exchange, queue=queue_3, routing_key="delete") def callback_1(ch, method, properties, body): @@ -62,5 +57,5 @@ def callback_3(ch, method, properties, body): channel.basic_consume(queue_2, callback_2, auto_ack=True) channel.basic_consume(queue_3, callback_3, auto_ack=True) -print('[*] Waiting for logs. To exit press CTRL+C') +print("[*] Waiting for logs. To exit press CTRL+C") channel.start_consuming() diff --git a/src/metax_api/tests/services/reference_data_mixin.py b/src/metax_api/tests/services/reference_data_mixin.py index 4d1463f3..9e522a3c 100755 --- a/src/metax_api/tests/services/reference_data_mixin.py +++ b/src/metax_api/tests/services/reference_data_mixin.py @@ -33,7 +33,6 @@ def get(self, *args, **kwargs): class ReferenceDataMixinTests(TestCase, TestClassUtils): - @classmethod def setUpClass(cls): """ @@ -47,7 +46,7 @@ def setUpClass(cls): cls.cache = RedisClient() def setUp(self): - self.cache.delete('reference_data') + self.cache.delete("reference_data") RDM.process_cached_reference_data = None def tearDown(self): @@ -63,14 +62,14 @@ def test_reference_data_reload_ok(self): RDM.get_reference_data(self.cache) self._assert_reference_data_ok() - @patch('metax_api.utils.ReferenceDataLoader.populate_cache_reference_data') + @patch("metax_api.utils.ReferenceDataLoader.populate_cache_reference_data") def test_reference_data_reload_in_progress(self, mock_populate_cache_reference_data): """ Ensure the reference data fetch survives when another request has already started reloading the reference data. The method should retry for a few seconds, and finally succeed """ return_data_after_retries = 1 - mock_populate_cache_reference_data.return_value = 'reload_started_by_other' + mock_populate_cache_reference_data.return_value = "reload_started_by_other" self._populate_cache_reference_data() mock_cache = MockRedisCacheService(return_data_after_retries=return_data_after_retries) @@ -78,10 +77,10 @@ def test_reference_data_reload_in_progress(self, mock_populate_cache_reference_d RDM.get_reference_data(mock_cache) self._assert_reference_data_ok() - #self.assertEqual(mock_cache.call_count, return_data_after_retries, + # self.assertEqual(mock_cache.call_count, return_data_after_retries, # 'ref data fetching should have retried a few times before succeeding') - @patch('metax_api.utils.ReferenceDataLoader.populate_cache_reference_data') + @patch("metax_api.utils.ReferenceDataLoader.populate_cache_reference_data") def test_reference_data_reload_in_progress_times_out(self, mock_populate_cache_reference_data): """ Ensure the reference data fetch finally gives up when another request has already started @@ -91,7 +90,7 @@ def test_reference_data_reload_in_progress_times_out(self, mock_populate_cache_r # since get_reference_data() retries MAX_RETRIES times, it should give up return_data_after_retries = 100 - mock_populate_cache_reference_data.return_value = 'reload_started_by_other' + mock_populate_cache_reference_data.return_value = "reload_started_by_other" self._populate_cache_reference_data() mock_cache = MockRedisCacheService(return_data_after_retries=return_data_after_retries) @@ -99,11 +98,15 @@ def test_reference_data_reload_in_progress_times_out(self, mock_populate_cache_r # the method being tested RDM.get_reference_data(mock_cache) except Exception as e: - self.assertEqual(e.__class__.__name__, 'Http503', 'ref data reload should raise Http503 when it gives up') + self.assertEqual( + e.__class__.__name__, + "Http503", + "ref data reload should raise Http503 when it gives up", + ) self._assert_reference_data_ok() - @patch('metax_api.utils.ReferenceDataLoader.populate_cache_reference_data') + @patch("metax_api.utils.ReferenceDataLoader.populate_cache_reference_data") def test_reference_data_reload_failed(self, mock_populate_cache_reference_data): """ Ensure 503 is raised when reload by the request failed @@ -123,13 +126,17 @@ def test_reference_data_reload_failed(self, mock_populate_cache_reference_data): # the method being tested RDM.get_reference_data(mock_cache) except Exception as e: - self.assertEqual(e.__class__.__name__, 'Http503', 'ref data reload should raise Http503 when it gives up') + self.assertEqual( + e.__class__.__name__, + "Http503", + "ref data reload should raise Http503 when it gives up", + ) self._assert_reference_data_ok() def _assert_reference_data_ok(self): - self.assertEqual('reference_data' in self.cache.get('reference_data'), True) - self.assertEqual('organization_data' in self.cache.get('reference_data'), True) + self.assertEqual("reference_data" in self.cache.get("reference_data"), True) + self.assertEqual("organization_data" in self.cache.get("reference_data"), True) def _populate_cache_reference_data(self): """ @@ -137,7 +144,10 @@ def _populate_cache_reference_data(self): in the tests. Instead, this method is executed to load something in the cache, which get_reference_data() will then try to return """ - self.cache.set('reference_data', { - 'reference_data': {'language': ['stuff']}, - 'organization_data': {'organization': ['stuff']}, - }) + self.cache.set( + "reference_data", + { + "reference_data": {"language": ["stuff"]}, + "organization_data": {"organization": ["stuff"]}, + }, + ) diff --git a/src/metax_api/tests/testdata/fetch_and_update_datasets.py b/src/metax_api/tests/testdata/fetch_and_update_datasets.py index 084dac6f..d8597b91 100755 --- a/src/metax_api/tests/testdata/fetch_and_update_datasets.py +++ b/src/metax_api/tests/testdata/fetch_and_update_datasets.py @@ -33,26 +33,32 @@ def get_auth_header(): for u in settings.API_USERS: - if u['username'] == 'metax': + if u["username"] == "metax": return { - 'Authorization': 'Basic %s' - % b64encode(bytes('%s:%s' % (u['username'], u['password']), 'utf-8')).decode('utf-8') + "Authorization": "Basic %s" + % b64encode(bytes("%s:%s" % (u["username"], u["password"]), "utf-8")).decode( + "utf-8" + ) } + def get_test_datasets(): - print('retrieving test datasets...') - response = requests.get('https://localhost/rest/datasets?metadata_owner_org=abc-org-123&pagination=false', - headers=headers, verify=False) + print("retrieving test datasets...") + response = requests.get( + "https://localhost/rest/datasets?metadata_owner_org=abc-org-123&pagination=false", + headers=headers, + verify=False, + ) if response.status_code != 200: raise Exception(response.content) records = response.json() if not records: - print('Received no records. Quiting...') + print("Received no records. Quiting...") return - print('Filtering out possible non-test datasets...') + print("Filtering out possible non-test datasets...") test_records = [] for record in records: if dataset_is_testdata(record): @@ -60,60 +66,81 @@ def get_test_datasets(): return test_records + def dataset_is_testdata(record): # Test datasets have fixed attributes so these should verify that it is a test dataset # Other tests could be added but these should be sufficient - end = f'd{record["id"]}' if record['id'] < 10 else f'{record["id"]}' + end = f'd{record["id"]}' if record["id"] < 10 else f'{record["id"]}' - if record['identifier'] != f'cr955e904-e3dd-4d7e-99f1-3fed446f96{end}' or \ - record['research_dataset']['creator'][0]['name'] != 'Teppo Testaaja': + if ( + record["identifier"] != f"cr955e904-e3dd-4d7e-99f1-3fed446f96{end}" + or record["research_dataset"]["creator"][0]["name"] != "Teppo Testaaja" + ): return False return True + def retrieve_and_update_all_datasets_in_db(headers): - print('-- begin retrieving and updating test datasets --') + print("-- begin retrieving and updating test datasets --") test_records = get_test_datasets() # dont want to create new versions from datasets for this operation, # so use parameter preserve_version - print('updating the test datasets...') - response = requests.put('https://localhost/rest/datasets?preserve_version', - headers=headers, data=dumps(test_records), verify=False) + print("updating the test datasets...") + response = requests.put( + "https://localhost/rest/datasets?preserve_version", + headers=headers, + data=dumps(test_records), + verify=False, + ) if response.status_code not in (200, 201, 204): print(response.status_code) raise Exception(response.text) - elif response.text and len(response.json().get('failed', [])) > 0: - for fail in response.json().get('failed'): + elif response.text and len(response.json().get("failed", [])) > 0: + for fail in response.json().get("failed"): raise Exception(fail) - print('-- done --') + print("-- done --") def retrieve_and_update_all_data_catalogs_in_db(headers): - print('-- begin retrieving and updating all data catalogs in the db --') + print("-- begin retrieving and updating all data catalogs in the db --") - print('retrieving all data catalog IDs...') - response = requests.get('https://localhost/rest/datacatalogs?limit=100', headers=headers, verify=False) + print("retrieving all data catalog IDs...") + response = requests.get( + "https://localhost/rest/datacatalogs?limit=100", headers=headers, verify=False + ) if response.status_code != 200: raise Exception(response.content) data_catalog_ids = [] - for dc in response.json().get('results', []): - if dc['catalog_json']['identifier'] == 'urn:nbn:fi:att:data-catalog-dft': + for dc in response.json().get("results", []): + if dc["catalog_json"]["identifier"] == "urn:nbn:fi:att:data-catalog-dft": pass else: - data_catalog_ids.append(dc.get('id')) + data_catalog_ids.append(dc.get("id")) - print('retrieving details of data catalogs and updating %d data catalogs...' % len(data_catalog_ids)) + print( + "retrieving details of data catalogs and updating %d data catalogs..." + % len(data_catalog_ids) + ) for dc_id in data_catalog_ids: - response = requests.get('https://localhost/rest/datacatalogs/%s' % dc_id, headers=headers, verify=False) + response = requests.get( + "https://localhost/rest/datacatalogs/%s" % dc_id, + headers=headers, + verify=False, + ) if response.status_code == 200: - update_response = requests.put('https://localhost/rest/datacatalogs/%s' % dc_id, - headers=headers, json=response.json(), verify=False) + update_response = requests.put( + "https://localhost/rest/datacatalogs/%s" % dc_id, + headers=headers, + json=response.json(), + verify=False, + ) if update_response.status_code not in (200, 201, 204): print(response.status_code) raise Exception(response.text) @@ -121,45 +148,54 @@ def retrieve_and_update_all_data_catalogs_in_db(headers): print(response.status_code) raise Exception(response.content) - print('-- done --') + print("-- done --") def update_directory_byte_sizes_and_file_counts(headers): - print('-- begin updating byte sizes and file counts in all dirs in all projects --') - response = requests.get('https://localhost/rest/directories/update_byte_sizes_and_file_counts', - headers=headers, verify=False) + print("-- begin updating byte sizes and file counts in all dirs in all projects --") + response = requests.get( + "https://localhost/rest/directories/update_byte_sizes_and_file_counts", + headers=headers, + verify=False, + ) if response.status_code not in (200, 201, 204): raise Exception(response.text) - print('-- done --') + print("-- done --") def update_ida_datasets_total_files_byte_size(headers): - print('-- begin updating IDA CR total ida byte sizes --') - response = requests.get('https://localhost/rest/datasets/update_cr_total_files_byte_sizes', - headers=headers, verify=False) + print("-- begin updating IDA CR total ida byte sizes --") + response = requests.get( + "https://localhost/rest/datasets/update_cr_total_files_byte_sizes", + headers=headers, + verify=False, + ) if response.status_code not in (200, 201, 204): raise Exception(response.text) - print('-- done --') + print("-- done --") def update_cr_directory_browsing_data(headers): - print('-- begin updating test CR directory byte sizes and file counts --') + print("-- begin updating test CR directory byte sizes and file counts --") test_records = get_test_datasets() for r in test_records: - response = requests.get(f'https://localhost/rest/datasets/update_cr_directory_browsing_data?id={r["id"]}', - headers=headers, verify=False) + response = requests.get( + f'https://localhost/rest/datasets/update_cr_directory_browsing_data?id={r["id"]}', + headers=headers, + verify=False, + ) if response.status_code not in (200, 201, 204): raise Exception(response.text) - print('-- done --') + print("-- done --") -if __name__ == '__main__': - headers = {'Content-type': 'application/json'} +if __name__ == "__main__": + headers = {"Content-type": "application/json"} headers.update(get_auth_header()) for i in range(1, 10): - response = requests.get('https://localhost/rest/datasets/1', headers=headers, verify=False) + response = requests.get("https://localhost/rest/datasets/1", headers=headers, verify=False) if response.status_code == 200: break sleep(1) @@ -167,6 +203,7 @@ def update_cr_directory_browsing_data(headers): print("Unable to GET dataset with pk 1, aborting... reason:") print(response.content) import sys + sys.exit(1) retrieve_and_update_all_datasets_in_db(headers) diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index dca29026..b841483d 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -42,18 +42,18 @@ # spread these evenly among the cr's catalog_records_owner_ids = [ - '053bffbcc41edad4853bea91fc42ea18', - '053d18ecb29e752cb7a35cd77b34f5fd', - '05593961536b76fa825281ccaedd4d4f', - '055ea4dade5ab2145954f56d4b51cef0', - '055ea531a6cac569425bed94459266ee', + "053bffbcc41edad4853bea91fc42ea18", + "053d18ecb29e752cb7a35cd77b34f5fd", + "05593961536b76fa825281ccaedd4d4f", + "055ea4dade5ab2145954f56d4b51cef0", + "055ea531a6cac569425bed94459266ee", ] # very slow with a large number of rows. we'll always validate the first loop tho validate_json = False # Location of schema files -schema_path = os.path.dirname(__file__) + '../api.rest.base/schemas' +schema_path = os.path.dirname(__file__) + "../api.rest.base/schemas" # identifier model type cr_type = 1 # catalog record @@ -61,28 +61,28 @@ def generate_file_storages(file_storage_max_rows): - print('generating file storages...') + print("generating file storages...") test_file_storage_list = [] - with open('file_storage_test_data_template.json') as json_file: + with open("file_storage_test_data_template.json") as json_file: row_template = json_load(json_file) - title = row_template['file_storage_json']['title'] - identifier = "pid:urn:storage" + row_template['file_storage_json']['identifier'] + title = row_template["file_storage_json"]["title"] + identifier = "pid:urn:storage" + row_template["file_storage_json"]["identifier"] for i in range(1, file_storage_max_rows + 1): new = { - 'fields': { - 'date_modified': '2017-06-23T10:07:22Z', - 'date_created': '2017-05-23T10:07:22Z', - 'file_storage_json': { - 'title': title % str(i), - 'identifier': identifier % str(i), - 'url': 'https://metax.fd-test.csc.fi/rest/filestorages/%d' % i, - } + "fields": { + "date_modified": "2017-06-23T10:07:22Z", + "date_created": "2017-05-23T10:07:22Z", + "file_storage_json": { + "title": title % str(i), + "identifier": identifier % str(i), + "url": "https://metax.fd-test.csc.fi/rest/filestorages/%d" % i, + }, }, - 'model': "metax_api.filestorage", - 'pk': i + "model": "metax_api.filestorage", + "pk": i, } test_file_storage_list.append(new) @@ -90,139 +90,169 @@ def generate_file_storages(file_storage_max_rows): def generate_files(test_file_storage_list, validate_json): - print('generating files...') + print("generating files...") - with open('file_test_data_template.json') as json_file: + with open("file_test_data_template.json") as json_file: row_template = json_load(json_file) - json_template = row_template['file_characteristics'].copy() - file_name = row_template['file_name'] - json_title = json_template['title'] - json_description = json_template['description'] + json_template = row_template["file_characteristics"].copy() + file_name = row_template["file_name"] + json_title = json_template["title"] + json_description = json_template["description"] directories = [] file_test_data_list = [] directory_test_data_list = [] - json_schema = get_json_schema('file') - file_storage = test_file_storage_list[0]['pk'] + json_schema = get_json_schema("file") + file_storage = test_file_storage_list[0]["pk"] for i in range(1, file_max_rows + 1): if i <= 20: - project_identifier = 'project_x' - project_root_folder = 'project_x_FROZEN' + project_identifier = "project_x" + project_root_folder = "project_x_FROZEN" else: - project_identifier = 'research_project_112' - project_root_folder = 'prj_112_root' + project_identifier = "research_project_112" + project_root_folder = "prj_112_root" loop = str(i) new = { - 'fields': row_template.copy(), - 'model': 'metax_api.file', + "fields": row_template.copy(), + "model": "metax_api.file", } - file_path = row_template['file_path'] + file_path = row_template["file_path"] # assing files to different directories to have something to browse if 1 <= i < 6: - file_path = file_path.replace('/some/path/', '/{0}/Experiment_X/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/Experiment_X/".format(project_root_folder) + ) elif 6 <= i < 11: - file_path = file_path.replace('/some/path/', '/{0}/Experiment_X/Phase_1/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/Experiment_X/Phase_1/".format(project_root_folder) + ) elif 11 <= i <= 20: - file_path = file_path.replace('/some/path/', '/{0}/Experiment_X/Phase_1/2017/01/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", + "/{0}/Experiment_X/Phase_1/2017/01/".format(project_root_folder), + ) if i == 21: - file_path = file_path.replace('/some/path/', '/{0}/science_data_A/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/science_data_A/".format(project_root_folder) + ) if 22 <= i < 25: - file_path = file_path.replace('/some/path/', '/{0}/science_data_A/phase_1/2018/01/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", + "/{0}/science_data_A/phase_1/2018/01/".format(project_root_folder), + ) if i == 25: - file_path = file_path.replace('/some/path/', '/{0}/science_data_B/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/science_data_B/".format(project_root_folder) + ) if i == 26: - file_path = file_path.replace('/some/path/', '/{0}/other/items/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/other/items/".format(project_root_folder) + ) if 27 <= i < 30: - file_path = file_path.replace('/some/path/', '/{0}/random_folder/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/random_folder/".format(project_root_folder) + ) if 30 <= i < 35: - file_path = file_path.replace('/some/path/', '/{0}/science_data_C/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", "/{0}/science_data_C/".format(project_root_folder) + ) elif 35 <= i < 40: - file_path = file_path.replace('/some/path/', '/{0}/science_data_C/phase_1/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", + "/{0}/science_data_C/phase_1/".format(project_root_folder), + ) elif 40 <= i < 50: - file_path = file_path.replace('/some/path/', '/{0}/science_data_C/phase_1/2017/01/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", + "/{0}/science_data_C/phase_1/2017/01/".format(project_root_folder), + ) elif 50 <= i < 70: - file_path = file_path.replace('/some/path/', '/{0}/science_data_C/phase_1/2017/02/'. - format(project_root_folder)) + file_path = file_path.replace( + "/some/path/", + "/{0}/science_data_C/phase_1/2017/02/".format(project_root_folder), + ) elif 70 <= i <= file_max_rows: - file_path = file_path.replace('/some/path/', '/{0}/science_data_C/phase_2/2017/10/'. - format(project_root_folder)) - - directory_id = get_parent_directory_for_path(directories, file_path, directory_test_data_list, - project_identifier) - - new['fields']['parent_directory'] = directory_id - new['fields']['project_identifier'] = project_identifier - new['fields']['file_name'] = file_name % loop - new['fields']['file_path'] = file_path % loop - new['fields']['identifier'] = "pid:urn:" + loop - new['fields']['file_characteristics']['title'] = json_title % loop - new['fields']['file_characteristics']['description'] = json_description % loop - new['fields']['file_storage'] = file_storage - new['fields']['byte_size'] = i * 100 - new['pk'] = i + file_path = file_path.replace( + "/some/path/", + "/{0}/science_data_C/phase_2/2017/10/".format(project_root_folder), + ) + + directory_id = get_parent_directory_for_path( + directories, file_path, directory_test_data_list, project_identifier + ) + + new["fields"]["parent_directory"] = directory_id + new["fields"]["project_identifier"] = project_identifier + new["fields"]["file_name"] = file_name % loop + new["fields"]["file_path"] = file_path % loop + new["fields"]["identifier"] = "pid:urn:" + loop + new["fields"]["file_characteristics"]["title"] = json_title % loop + new["fields"]["file_characteristics"]["description"] = json_description % loop + new["fields"]["file_storage"] = file_storage + new["fields"]["byte_size"] = i * 100 + new["pk"] = i if validate_json or i == 1: - json_validate(new['fields']['file_characteristics'], json_schema) + json_validate(new["fields"]["file_characteristics"], json_schema) file_test_data_list.append(new) return file_test_data_list, directory_test_data_list -def get_parent_directory_for_path(directories, file_path, directory_test_data_list, project_identifier): +def get_parent_directory_for_path( + directories, file_path, directory_test_data_list, project_identifier +): dir_name = os.path.dirname(file_path) for d in directories: - if d['fields']['directory_path'] == dir_name and d['fields']['project_identifier'] == project_identifier: - return d['pk'] - return create_parent_directory_for_path(directories, dir_name, directory_test_data_list, project_identifier) - - -def create_parent_directory_for_path(directories, file_path, directory_test_data_list, project_identifier): + if ( + d["fields"]["directory_path"] == dir_name + and d["fields"]["project_identifier"] == project_identifier + ): + return d["pk"] + return create_parent_directory_for_path( + directories, dir_name, directory_test_data_list, project_identifier + ) + + +def create_parent_directory_for_path( + directories, file_path, directory_test_data_list, project_identifier +): """ Recursively creates the requested directories for file_path """ - with open('directory_test_data_template.json') as json_file: + with open("directory_test_data_template.json") as json_file: row_template = json_load(json_file) - if file_path == '/': + if file_path == "/": directory_id = None else: # the directory where a file or dir belongs to, must be retrieved or created first - directory_id = get_parent_directory_for_path(directories, file_path, directory_test_data_list, - project_identifier) + directory_id = get_parent_directory_for_path( + directories, file_path, directory_test_data_list, project_identifier + ) # all parent dirs have been created - now create the dir that was originally asked for new_id = len(directories) + 1 new = { - 'fields': row_template.copy(), - 'model': 'metax_api.directory', - 'pk': new_id, + "fields": row_template.copy(), + "model": "metax_api.directory", + "pk": new_id, } # note: it is possible that parent_directory is null (top-level directories) - new['fields']['parent_directory'] = directory_id - new['fields']['directory_name'] = os.path.basename(file_path) - new['fields']['directory_path'] = file_path - new['fields']['identifier'] = new['fields']['identifier'] % new_id - new['fields']['project_identifier'] = project_identifier + new["fields"]["parent_directory"] = directory_id + new["fields"]["directory_name"] = os.path.basename(file_path) + new["fields"]["directory_path"] = file_path + new["fields"]["identifier"] = new["fields"]["identifier"] % new_id + new["fields"]["project_identifier"] = project_identifier directory_test_data_list.append(new) directories.append(new) @@ -230,234 +260,269 @@ def create_parent_directory_for_path(directories, file_path, directory_test_data return new_id -def save_test_data(file_storage_list, file_list, directory_list, data_catalogs_list, contract_list, - catalog_record_list, dataset_version_sets): - with open('test_data.json', 'w') as f: - print('dumping test data as json to metax_api/tests/test_data.json...') - json_dump(file_storage_list + directory_list + file_list + data_catalogs_list + contract_list + - dataset_version_sets + catalog_record_list, f, indent=4, sort_keys=True) +def save_test_data( + file_storage_list, + file_list, + directory_list, + data_catalogs_list, + contract_list, + catalog_record_list, + dataset_version_sets, +): + with open("test_data.json", "w") as f: + print("dumping test data as json to metax_api/tests/test_data.json...") + json_dump( + file_storage_list + + directory_list + + file_list + + data_catalogs_list + + contract_list + + dataset_version_sets + + catalog_record_list, + f, + indent=4, + sort_keys=True, + ) def generate_data_catalogs(start_idx, data_catalog_max_rows, validate_json, type): - print('generating %s data catalogs...' % type) + print("generating %s data catalogs..." % type) test_data_catalog_list = [] - json_schema = get_json_schema('datacatalog') + json_schema = get_json_schema("datacatalog") - with open('data_catalog_test_data_template.json') as json_file: + with open("data_catalog_test_data_template.json") as json_file: row_template = json_load(json_file) for i in range(start_idx, start_idx + data_catalog_max_rows): new = { - 'fields': deepcopy(row_template), - 'model': "metax_api.datacatalog", - 'pk': i, + "fields": deepcopy(row_template), + "model": "metax_api.datacatalog", + "pk": i, } - new['fields']['date_modified'] = '2017-06-15T10:07:22Z' - new['fields']['date_created'] = '2017-05-15T10:07:22Z' - new['fields']['catalog_json']['identifier'] = generate_test_identifier(dc_type, i) + new["fields"]["date_modified"] = "2017-06-15T10:07:22Z" + new["fields"]["date_created"] = "2017-05-15T10:07:22Z" + new["fields"]["catalog_json"]["identifier"] = generate_test_identifier(dc_type, i) - if type == 'ida': - new['fields']['catalog_json']['research_dataset_schema'] = 'ida' - elif type == 'att': - new['fields']['catalog_json']['research_dataset_schema'] = 'att' + if type == "ida": + new["fields"]["catalog_json"]["research_dataset_schema"] = "ida" + elif type == "att": + new["fields"]["catalog_json"]["research_dataset_schema"] = "att" if i in (start_idx, start_idx + 1): # lets pretend that the first two data catalogs will support versioning, # they are "fairdata catalogs" dataset_versioning = True - new['fields']['catalog_json']['harvested'] = False + new["fields"]["catalog_json"]["harvested"] = False else: dataset_versioning = False # rest of the catalogs are harvested - new['fields']['catalog_json']['harvested'] = True + new["fields"]["catalog_json"]["harvested"] = True - new['fields']['catalog_json']['dataset_versioning'] = dataset_versioning + new["fields"]["catalog_json"]["dataset_versioning"] = dataset_versioning test_data_catalog_list.append(new) if validate_json or i == start_idx: - json_validate(new['fields']['catalog_json'], json_schema) + json_validate(new["fields"]["catalog_json"], json_schema) return test_data_catalog_list def generate_contracts(contract_max_rows, validate_json): - print('generating contracts...') + print("generating contracts...") test_contract_list = [] - json_schema = get_json_schema('contract') + json_schema = get_json_schema("contract") - with open('contract_test_data_template.json') as json_file: + with open("contract_test_data_template.json") as json_file: row_template = json_load(json_file) # sample contract provided by PAS new = { - 'fields': deepcopy(row_template[0]), - 'model': "metax_api.contract", - 'pk': 1, + "fields": deepcopy(row_template[0]), + "model": "metax_api.contract", + "pk": 1, } - json_validate(new['fields']['contract_json'], json_schema) + json_validate(new["fields"]["contract_json"], json_schema) test_contract_list.append(new) for i in range(2, contract_max_rows + 1): new = { - 'fields': deepcopy(row_template[1]), - 'model': "metax_api.contract", - 'pk': i, + "fields": deepcopy(row_template[1]), + "model": "metax_api.contract", + "pk": i, } - new['fields']['contract_json']['identifier'] = "optional:contract:identifier%d" % i - new['fields']['contract_json']['title'] = "Title of Contract %d" % i - new['fields']['contract_json']['organization']['organization_identifier'] = "1234567-%d" % i - new['fields']['date_modified'] = '2017-06-15T10:07:22Z' - new['fields']['date_created'] = '2017-05-15T10:07:22Z' + new["fields"]["contract_json"]["identifier"] = "optional:contract:identifier%d" % i + new["fields"]["contract_json"]["title"] = "Title of Contract %d" % i + new["fields"]["contract_json"]["organization"]["organization_identifier"] = "1234567-%d" % i + new["fields"]["date_modified"] = "2017-06-15T10:07:22Z" + new["fields"]["date_created"] = "2017-05-15T10:07:22Z" test_contract_list.append(new) if validate_json or i == 1: - json_validate(new['fields']['contract_json'], json_schema) + json_validate(new["fields"]["contract_json"], json_schema) return test_contract_list -def generate_catalog_records(basic_catalog_record_max_rows, data_catalogs_list, contract_list, file_list, - validate_json, type, test_data_list=[], dataset_version_sets=[]): - print('generating %s catalog records...' % type) +def generate_catalog_records( + basic_catalog_record_max_rows, + data_catalogs_list, + contract_list, + file_list, + validate_json, + type, + test_data_list=[], + dataset_version_sets=[], +): + print("generating %s catalog records..." % type) - with open('catalog_record_test_data_template.json') as json_file: + with open("catalog_record_test_data_template.json") as json_file: row_template = json_load(json_file) files_start_idx = 1 - data_catalog_id = data_catalogs_list[0]['pk'] + data_catalog_id = data_catalogs_list[0]["pk"] owner_idx = 0 start_idx = len(test_data_list) + 1 for i in range(start_idx, start_idx + basic_catalog_record_max_rows): json_schema = None - if type == 'ida': - json_schema = get_json_schema('ida_dataset') - elif type == 'att': - json_schema = get_json_schema('att_dataset') + if type == "ida": + json_schema = get_json_schema("ida_dataset") + elif type == "att": + json_schema = get_json_schema("att_dataset") new = { - 'fields': row_template.copy(), - 'model': 'metax_api.catalogrecord', - 'pk': i, + "fields": row_template.copy(), + "model": "metax_api.catalogrecord", + "pk": i, } - if data_catalog_id in (1, 2): # versioned catalogs only + if data_catalog_id in (1, 2): # versioned catalogs only dataset_version_set = { - 'fields': {}, - 'model': 'metax_api.datasetversionset', - 'pk': i, + "fields": {}, + "model": "metax_api.datasetversionset", + "pk": i, } - new['fields']['dataset_version_set'] = dataset_version_set['pk'] + new["fields"]["dataset_version_set"] = dataset_version_set["pk"] dataset_version_sets.append(dataset_version_set) # comment this line. i dare you. # for real tho, required to prevent some strange behaving references to old data - new['fields']['research_dataset'] = row_template['research_dataset'].copy() - new['fields']['data_catalog'] = data_catalog_id - new['fields']['research_dataset']['metadata_version_identifier'] = generate_test_identifier(cr_type, i, - urn=False) - new['fields']['research_dataset']['preferred_identifier'] = generate_test_identifier(cr_type, i) - new['fields']['identifier'] = generate_test_identifier('cr', i, urn=False) - new['fields']['date_modified'] = '2017-06-23T10:07:22Z' - new['fields']['date_created'] = '2017-05-23T10:07:22Z' - new['fields']['files'] = [] + new["fields"]["research_dataset"] = row_template["research_dataset"].copy() + new["fields"]["data_catalog"] = data_catalog_id + new["fields"]["research_dataset"]["metadata_version_identifier"] = generate_test_identifier( + cr_type, i, urn=False + ) + new["fields"]["research_dataset"]["preferred_identifier"] = generate_test_identifier( + cr_type, i + ) + new["fields"]["identifier"] = generate_test_identifier("cr", i, urn=False) + new["fields"]["date_modified"] = "2017-06-23T10:07:22Z" + new["fields"]["date_created"] = "2017-05-23T10:07:22Z" + new["fields"]["files"] = [] # add files - if type == 'ida': - new['fields']['files'] = [] + if type == "ida": + new["fields"]["files"] = [] dataset_files = [] total_files_byte_size = 0 file_divider = 4 for j in range(files_start_idx, files_start_idx + files_per_dataset): - total_files_byte_size += file_list[j - 1]['fields']['byte_size'] + total_files_byte_size += file_list[j - 1]["fields"]["byte_size"] # note - this field will go in the m2m table in the db when importing generated testdata... - new['fields']['files'].append(file_list[j - 1]['pk']) + new["fields"]["files"].append(file_list[j - 1]["pk"]) # ... while every API operation will look at research_dataset.files.identifier # to lookup the file - be careful the identifier below matches with the m2m id set above - dataset_files.append({ - 'identifier': file_list[j - 1]['fields']['identifier'], - 'title': 'File metadata title %d' % j - }) + dataset_files.append( + { + "identifier": file_list[j - 1]["fields"]["identifier"], + "title": "File metadata title %d" % j, + } + ) if j < file_divider: # first fifth of files - dataset_files[-1]['file_type'] = { + dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", } - dataset_files[-1]['use_category'] = { - 'identifier': 'http://uri.suomi.fi/codelist/fairdata/use_category/code/source' + dataset_files[-1]["use_category"] = { + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source" } elif file_divider <= j < (file_divider * 2): # second fifth of files - dataset_files[-1]['file_type'] = { + dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video" } - dataset_files[-1]['use_category'] = { - 'identifier': 'http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome' + dataset_files[-1]["use_category"] = { + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome" } elif (file_divider * 2) <= j < (file_divider * 3): # third fifth of files - dataset_files[-1]['file_type'] = { + dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image" } - dataset_files[-1]['use_category'] = { - 'identifier': 'http://uri.suomi.fi/codelist/fairdata/use_category/code/publication' + dataset_files[-1]["use_category"] = { + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication" } elif (file_divider * 3) <= j < (file_divider * 4): # fourth fifth of files - dataset_files[-1]['file_type'] = { + dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code" } - dataset_files[-1]['use_category'] = { - 'identifier': 'http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation' + dataset_files[-1]["use_category"] = { + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation" } else: # the rest of files - dataset_files[-1]['use_category'] = { - 'identifier': 'configuration' - } + dataset_files[-1]["use_category"] = {"identifier": "configuration"} - new['fields']['research_dataset']['files'] = dataset_files - new['fields']['research_dataset']['total_files_byte_size'] = total_files_byte_size + new["fields"]["research_dataset"]["files"] = dataset_files + new["fields"]["research_dataset"]["total_files_byte_size"] = total_files_byte_size files_start_idx += files_per_dataset - elif type == 'att': - new['fields']['research_dataset']['remote_resources'] = [ + elif type == "att": + new["fields"]["research_dataset"]["remote_resources"] = [ { "title": "Remote resource {0}".format(str(i)), "modified": "2014-01-12T17:11:54Z", "use_category": {"identifier": "outcome"}, - "checksum": {"algorithm": "SHA-256", "checksum_value": "u5y6f4y68765ngf6ry8n"}, - "byte_size": i * 512 + "checksum": { + "algorithm": "SHA-256", + "checksum_value": "u5y6f4y68765ngf6ry8n", + }, + "byte_size": i * 512, }, { "title": "Other remote resource {0}".format(str(i)), "modified": "2013-01-12T11:11:54Z", "use_category": {"identifier": "source"}, - "checksum": {"algorithm": "SHA-512", "checksum_value": "u3k4kn7n1g56l6rq5a5s"}, - "byte_size": i * 1024 - } + "checksum": { + "algorithm": "SHA-512", + "checksum_value": "u3k4kn7n1g56l6rq5a5s", + }, + "byte_size": i * 1024, + }, ] total_remote_resources_byte_size = 0 - for rr in new['fields']['research_dataset']['remote_resources']: - total_remote_resources_byte_size += rr.get('byte_size', 0) - new['fields']['research_dataset'][ - 'total_remote_resources_byte_size'] = total_remote_resources_byte_size + for rr in new["fields"]["research_dataset"]["remote_resources"]: + total_remote_resources_byte_size += rr.get("byte_size", 0) + new["fields"]["research_dataset"][ + "total_remote_resources_byte_size" + ] = total_remote_resources_byte_size if validate_json or i == start_idx: - json_validate(new['fields']['research_dataset'], json_schema) + json_validate(new["fields"]["research_dataset"], json_schema) test_data_list.append(new) @@ -465,99 +530,103 @@ def generate_catalog_records(basic_catalog_record_max_rows, data_catalogs_list, # see CatalogRecord model PRESERVATION_STATE_... for value definitions for i, pres_state_value in enumerate((0, 10, 10, 40, 50, 70, 70, 90, 130, 140)): - test_data_list[i]['fields']['preservation_state'] = pres_state_value + test_data_list[i]["fields"]["preservation_state"] = pres_state_value if i > 0: - test_data_list[i]['fields']['contract'] = 1 + test_data_list[i]["fields"]["contract"] = 1 if i in (90, 140): # packaging, dissemination - test_data_list[i]['fields']['mets_object_identifier'] = ["a", "b", "c"] - - test_data_list[i]['fields']['research_dataset']['curator'] = [{ - "@type": "Person", - "name": "Rahikainen", - "identifier": "id:of:curator:rahikainen", - "member_of": { - "@type": "Organization", - "name": { - "fi": "MysteeriOrganisaatio" - } + test_data_list[i]["fields"]["mets_object_identifier"] = ["a", "b", "c"] + + test_data_list[i]["fields"]["research_dataset"]["curator"] = [ + { + "@type": "Person", + "name": "Rahikainen", + "identifier": "id:of:curator:rahikainen", + "member_of": { + "@type": "Organization", + "name": {"fi": "MysteeriOrganisaatio"}, + }, } - }] + ] # set different owner for i in range(start_idx + 5, len(test_data_list)): - test_data_list[i]['fields']['research_dataset']['curator'] = [{ - "@type": "Person", - "name": "Jarski", - "identifier": "id:of:curator:jarski", - "member_of": { - "@type": "Organization", - "name": { - "fi": "MysteeriOrganisaatio" - } + test_data_list[i]["fields"]["research_dataset"]["curator"] = [ + { + "@type": "Person", + "name": "Jarski", + "identifier": "id:of:curator:jarski", + "member_of": { + "@type": "Organization", + "name": {"fi": "MysteeriOrganisaatio"}, + }, } - }] + ] # if preservation_state is other than 0, means it has been modified at some point, # so set timestamp for i in range(start_idx - 1, len(test_data_list)): row = test_data_list[i] - if row['fields']['preservation_state'] != 0: - row['fields']['preservation_state_modified'] = '2017-05-23T10:07:22.559656Z' + if row["fields"]["preservation_state"] != 0: + row["fields"]["preservation_state_modified"] = "2017-05-23T10:07:22.559656Z" # add a couple of catalog records with fuller research_dataset fields belonging to both ida and att data catalog total_files_byte_size = 0 - if type == 'ida': - template = 'catalog_record_test_data_template_full_ida.json' - elif type == 'att': - template = 'catalog_record_test_data_template_full_att.json' + if type == "ida": + template = "catalog_record_test_data_template_full_ida.json" + elif type == "att": + template = "catalog_record_test_data_template_full_att.json" with open(template) as json_file: row_template_full = json_load(json_file) for j in [0, 1, 2]: new = { - 'fields': deepcopy(row_template_full), - 'model': 'metax_api.catalogrecord', - 'pk': len(test_data_list) + 1, + "fields": deepcopy(row_template_full), + "model": "metax_api.catalogrecord", + "pk": len(test_data_list) + 1, } - if data_catalog_id in (1, 2): # versioned catalogs only + if data_catalog_id in (1, 2): # versioned catalogs only dataset_version_set = { - 'fields': {}, - 'model': 'metax_api.datasetversionset', - 'pk': len(test_data_list) + 1, + "fields": {}, + "model": "metax_api.datasetversionset", + "pk": len(test_data_list) + 1, } - new['fields']['dataset_version_set'] = dataset_version_set['pk'] + new["fields"]["dataset_version_set"] = dataset_version_set["pk"] dataset_version_sets.append(dataset_version_set) # for the relation in the db. includes dir id 3, which includes all 20 files - new['fields']['data_catalog'] = data_catalog_id - new['fields']['date_modified'] = '2017-09-23T10:07:22Z' - new['fields']['date_created'] = '2017-05-23T10:07:22Z' - new['fields']['editor'] = { - 'owner_id': catalog_records_owner_ids[j], - 'creator_id': catalog_records_owner_ids[owner_idx], + new["fields"]["data_catalog"] = data_catalog_id + new["fields"]["date_modified"] = "2017-09-23T10:07:22Z" + new["fields"]["date_created"] = "2017-05-23T10:07:22Z" + new["fields"]["editor"] = { + "owner_id": catalog_records_owner_ids[j], + "creator_id": catalog_records_owner_ids[owner_idx], } - new['fields']['research_dataset']['metadata_version_identifier'] = \ - generate_test_identifier(cr_type, len(test_data_list) + 1, urn=False) - new['fields']['research_dataset']['preferred_identifier'] = \ - generate_test_identifier(cr_type, len(test_data_list) + 1) - new['fields']['identifier'] = generate_test_identifier('cr', len(test_data_list) + 1, urn=False) - - if type == 'ida': + new["fields"]["research_dataset"]["metadata_version_identifier"] = generate_test_identifier( + cr_type, len(test_data_list) + 1, urn=False + ) + new["fields"]["research_dataset"]["preferred_identifier"] = generate_test_identifier( + cr_type, len(test_data_list) + 1 + ) + new["fields"]["identifier"] = generate_test_identifier( + "cr", len(test_data_list) + 1, urn=False + ) + + if type == "ida": if j in [0, 1]: - new['fields']['files'] = [i for i in range(1, 21)] - file_identifier_0 = file_list[0]['fields']['identifier'] - file_identifier_1 = file_list[1]['fields']['identifier'] - total_files_byte_size = sum(f['fields']['byte_size'] for f in file_list[0:19]) - new['fields']['research_dataset']['total_files_byte_size'] = total_files_byte_size - new['fields']['research_dataset']['files'][0]['identifier'] = file_identifier_0 - new['fields']['research_dataset']['files'][1]['identifier'] = file_identifier_1 + new["fields"]["files"] = [i for i in range(1, 21)] + file_identifier_0 = file_list[0]["fields"]["identifier"] + file_identifier_1 = file_list[1]["fields"]["identifier"] + total_files_byte_size = sum(f["fields"]["byte_size"] for f in file_list[0:19]) + new["fields"]["research_dataset"]["total_files_byte_size"] = total_files_byte_size + new["fields"]["research_dataset"]["files"][0]["identifier"] = file_identifier_0 + new["fields"]["research_dataset"]["files"][1]["identifier"] = file_identifier_1 elif j == 2: db_files = [] directories = [] @@ -576,11 +645,9 @@ def generate_catalog_records(basic_catalog_record_max_rows, data_catalogs_list, "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.of.filetype.concept/scheme", }, - "use_category": { - "identifier": "configuration" - } + "use_category": {"identifier": "configuration"}, }, { "identifier": "pid:urn:28", @@ -591,12 +658,10 @@ def generate_catalog_records(basic_catalog_record_max_rows, data_catalogs_list, "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.of.filetype.concept/scheme", }, - "use_category": { - "identifier": "publication" - } - } + "use_category": {"identifier": "publication"}, + }, ] directories = [ @@ -604,58 +669,52 @@ def generate_catalog_records(basic_catalog_record_max_rows, data_catalogs_list, "identifier": "pid:urn:dir:18", "title": "Phase 1 of science data C", "description": "Description of the directory", - "use_category": { - "identifier": "outcome" - } + "use_category": {"identifier": "outcome"}, }, { "identifier": "pid:urn:dir:22", "title": "Phase 2 of science data C", "description": "Description of the directory", - "use_category": { - "identifier": "outcome" - } + "use_category": {"identifier": "outcome"}, }, { "identifier": "pid:urn:dir:12", "title": "Phase 1 01/2018 of Science data A", "description": "Description of the directory", - "use_category": { - "identifier": "outcome" - } + "use_category": {"identifier": "outcome"}, }, { "identifier": "pid:urn:dir:13", "title": "Science data B", "description": "Description of the directory", - "use_category": { - "identifier": "source" - } + "use_category": {"identifier": "source"}, }, { "identifier": "pid:urn:dir:14", "title": "Other stuff", "description": "Description of the directory", - "use_category": { - "identifier": "method" - } - } + "use_category": {"identifier": "method"}, + }, ] - total_files_byte_size += sum(file_list[file_pk - 1]['fields']['byte_size'] for file_pk in db_files) + total_files_byte_size += sum( + file_list[file_pk - 1]["fields"]["byte_size"] for file_pk in db_files + ) - new['fields']['files'] = db_files - new['fields']['research_dataset']['files'] = files - new['fields']['research_dataset']['directories'] = directories - new['fields']['research_dataset']['total_files_byte_size'] = total_files_byte_size - elif type == 'att': + new["fields"]["files"] = db_files + new["fields"]["research_dataset"]["files"] = files + new["fields"]["research_dataset"]["directories"] = directories + new["fields"]["research_dataset"]["total_files_byte_size"] = total_files_byte_size + elif type == "att": total_remote_resources_byte_size = 0 - if 'remote_resources' in new['fields']['research_dataset']: - for rr in new['fields']['research_dataset']['remote_resources']: - total_remote_resources_byte_size += rr.get('byte_size', 0) - new['fields']['research_dataset']['total_remote_resources_byte_size'] = total_remote_resources_byte_size - - json_validate(new['fields']['research_dataset'], json_schema) + if "remote_resources" in new["fields"]["research_dataset"]: + for rr in new["fields"]["research_dataset"]["remote_resources"]: + total_remote_resources_byte_size += rr.get("byte_size", 0) + new["fields"]["research_dataset"][ + "total_remote_resources_byte_size" + ] = total_remote_resources_byte_size + + json_validate(new["fields"]["research_dataset"], json_schema) test_data_list.append(new) return test_data_list, dataset_version_sets @@ -668,38 +727,44 @@ def generate_alt_catalog_records(test_data_list): # note, these alt records wont have an editor-field set, since they presumably # originated to metax from somewhere else than qvain (were harvested). # - print('generating alternate catalog records...') + print("generating alternate catalog records...") alternate_record_set = { - 'fields': {}, - 'model': 'metax_api.alternaterecordset', - 'pk': 1, + "fields": {}, + "model": "metax_api.alternaterecordset", + "pk": 1, } # first record belongs to alt record set - test_data_list[9]['fields']['alternate_record_set'] = 1 + test_data_list[9]["fields"]["alternate_record_set"] = 1 # create one other record alt_rec = deepcopy(test_data_list[9]) - alt_rec['pk'] = test_data_list[-1]['pk'] + 1 - alt_rec['fields']['research_dataset']['preferred_identifier'] = test_data_list[9]['fields']['research_dataset'][ - 'preferred_identifier'] - alt_rec['fields']['research_dataset']['metadata_version_identifier'] += '-alt-1' - alt_rec['fields']['identifier'] = generate_test_identifier('cr', len(test_data_list) + 1, urn=False) - alt_rec['fields']['data_catalog'] = 2 - alt_rec['fields']['alternate_record_set'] = 1 - alt_rec['fields'].pop('dataset_version_set', None) + alt_rec["pk"] = test_data_list[-1]["pk"] + 1 + alt_rec["fields"]["research_dataset"]["preferred_identifier"] = test_data_list[9]["fields"][ + "research_dataset" + ]["preferred_identifier"] + alt_rec["fields"]["research_dataset"]["metadata_version_identifier"] += "-alt-1" + alt_rec["fields"]["identifier"] = generate_test_identifier( + "cr", len(test_data_list) + 1, urn=False + ) + alt_rec["fields"]["data_catalog"] = 2 + alt_rec["fields"]["alternate_record_set"] = 1 + alt_rec["fields"].pop("dataset_version_set", None) test_data_list.append(alt_rec) # create second other record alt_rec = deepcopy(test_data_list[9]) - alt_rec['pk'] = test_data_list[-1]['pk'] + 1 - alt_rec['fields']['research_dataset']['preferred_identifier'] = test_data_list[9]['fields']['research_dataset'][ - 'preferred_identifier'] - alt_rec['fields']['research_dataset']['metadata_version_identifier'] += '-alt-2' - alt_rec['fields']['identifier'] = generate_test_identifier('cr', len(test_data_list) + 1, urn=False) - alt_rec['fields']['data_catalog'] = 3 - alt_rec['fields']['alternate_record_set'] = 1 - alt_rec['fields'].pop('dataset_version_set', None) + alt_rec["pk"] = test_data_list[-1]["pk"] + 1 + alt_rec["fields"]["research_dataset"]["preferred_identifier"] = test_data_list[9]["fields"][ + "research_dataset" + ]["preferred_identifier"] + alt_rec["fields"]["research_dataset"]["metadata_version_identifier"] += "-alt-2" + alt_rec["fields"]["identifier"] = generate_test_identifier( + "cr", len(test_data_list) + 1, urn=False + ) + alt_rec["fields"]["data_catalog"] = 3 + alt_rec["fields"]["alternate_record_set"] = 1 + alt_rec["fields"].pop("dataset_version_set", None) test_data_list.append(alt_rec) # alternate record set must exist before importing catalog records, so prepend it @@ -708,50 +773,75 @@ def generate_alt_catalog_records(test_data_list): def set_qvain_info_to_records(catalog_record_list): - ''' + """ For data catalog ids 1,2 set qvain info, since they are supposedly fairdata catalogs. - ''' + """ owner_idx = 0 total_qvain_users = len(catalog_records_owner_ids) for cr in catalog_record_list: - if cr['model'] != 'metax_api.catalogrecord': + if cr["model"] != "metax_api.catalogrecord": # there are other type of objects in the list (at least datasetversionset) continue - if cr['fields']['data_catalog'] not in (1, 2): + if cr["fields"]["data_catalog"] not in (1, 2): continue - cr['fields']['editor'] = { - 'owner_id': catalog_records_owner_ids[owner_idx], - 'creator_id': catalog_records_owner_ids[owner_idx], - 'identifier': 'qvain', - 'record_id': '955e904-e3dd-4d7e-99f1-3fed446f9%03d' % cr['pk'], # 3 leading zeroes to preserve length + cr["fields"]["editor"] = { + "owner_id": catalog_records_owner_ids[owner_idx], + "creator_id": catalog_records_owner_ids[owner_idx], + "identifier": "qvain", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9%03d" + % cr["pk"], # 3 leading zeroes to preserve length } owner_idx += 1 if owner_idx >= total_qvain_users: owner_idx = 0 -if __name__ == '__main__': - print('begin generating test data...') +if __name__ == "__main__": + print("begin generating test data...") contract_list = generate_contracts(contract_max_rows, validate_json) file_storage_list = generate_file_storages(file_storage_max_rows) file_list, directory_list = generate_files(file_storage_list, validate_json) - ida_data_catalogs_list = generate_data_catalogs(1, ida_data_catalog_max_rows, validate_json, 'ida') - att_data_catalogs_list = generate_data_catalogs(ida_data_catalog_max_rows + 1, att_data_catalog_max_rows, - validate_json, 'att') - - catalog_record_list, dataset_version_sets = generate_catalog_records(ida_catalog_record_max_rows, - ida_data_catalogs_list, contract_list, file_list, validate_json, 'ida') - - catalog_record_list, dataset_version_sets = generate_catalog_records(att_catalog_record_max_rows, - att_data_catalogs_list, contract_list, [], validate_json, 'att', catalog_record_list, dataset_version_sets) + ida_data_catalogs_list = generate_data_catalogs( + 1, ida_data_catalog_max_rows, validate_json, "ida" + ) + att_data_catalogs_list = generate_data_catalogs( + ida_data_catalog_max_rows + 1, att_data_catalog_max_rows, validate_json, "att" + ) + + catalog_record_list, dataset_version_sets = generate_catalog_records( + ida_catalog_record_max_rows, + ida_data_catalogs_list, + contract_list, + file_list, + validate_json, + "ida", + ) + + catalog_record_list, dataset_version_sets = generate_catalog_records( + att_catalog_record_max_rows, + att_data_catalogs_list, + contract_list, + [], + validate_json, + "att", + catalog_record_list, + dataset_version_sets, + ) catalog_record_list = generate_alt_catalog_records(catalog_record_list) set_qvain_info_to_records(catalog_record_list) - save_test_data(file_storage_list, directory_list, file_list, ida_data_catalogs_list + att_data_catalogs_list, - contract_list, catalog_record_list, dataset_version_sets) - - print('done') + save_test_data( + file_storage_list, + directory_list, + file_list, + ida_data_catalogs_list + att_data_catalogs_list, + contract_list, + catalog_record_list, + dataset_version_sets, + ) + + print("done") diff --git a/src/metax_api/tests/testdata/request_test.py b/src/metax_api/tests/testdata/request_test.py index 56085e2b..20a06868 100755 --- a/src/metax_api/tests/testdata/request_test.py +++ b/src/metax_api/tests/testdata/request_test.py @@ -12,7 +12,7 @@ d = print d("STARTING MEASUREMENTS") -url = 'https://metax.csc.local/rest/files/urn:nbn:fi:csc-ida201401200000100000' +url = "https://metax.csc.local/rest/files/urn:nbn:fi:csc-ida201401200000100000" # url = 'https://metax.csc.local/rest/files/d1a53535-2adb-5184-a10b-8dd7338d0a41' times = [] run_times = 20 diff --git a/src/metax_api/tests/utils.py b/src/metax_api/tests/utils.py index 9e167160..1b5b7992 100755 --- a/src/metax_api/tests/utils.py +++ b/src/metax_api/tests/utils.py @@ -28,10 +28,7 @@ def assert_catalog_record_is_open_access(cr): from metax_api.models.catalog_record import ACCESS_TYPES access_type = ( - cr["research_dataset"] - .get("access_rights", {}) - .get("access_type", {}) - .get("identifier", "") + cr["research_dataset"].get("access_rights", {}).get("access_type", {}).get("identifier", "") ) assert access_type == ACCESS_TYPES["open"] @@ -40,10 +37,7 @@ def assert_catalog_record_not_open_access(cr): from metax_api.models.catalog_record import ACCESS_TYPES access_type = ( - cr["research_dataset"] - .get("access_rights", {}) - .get("access_type", {}) - .get("identifier", "") + cr["research_dataset"].get("access_rights", {}).get("access_type", {}).get("identifier", "") ) assert access_type != ACCESS_TYPES["open"] @@ -135,7 +129,7 @@ def create_end_user_data_catalogs(self): date_created=get_tz_aware_now_without_micros(), catalog_record_services_create="testuser,api_auth_user,metax", catalog_record_services_edit="testuser,api_auth_user,metax", - catalog_record_services_read='testuser,api_auth_user,metax' + catalog_record_services_read="testuser,api_auth_user,metax", ) def create_legacy_data_catalogs(self): @@ -153,7 +147,7 @@ def create_legacy_data_catalogs(self): date_created=get_tz_aware_now_without_micros(), catalog_record_services_create="testuser,api_auth_user,metax", catalog_record_services_edit="testuser,api_auth_user,metax", - catalog_record_services_read='testuser,api_auth_user,metax' + catalog_record_services_read="testuser,api_auth_user,metax", ) def _set_http_authorization(self, credentials_type): @@ -162,9 +156,7 @@ def _set_http_authorization(self, credentials_type): self.client.credentials() elif credentials_type == "service": - self._use_http_authorization( - username="metax" - ) + self._use_http_authorization(username="metax") elif credentials_type == "owner": self._use_http_authorization(method="bearer", token=self.token) self._mock_token_validation_succeeds() @@ -207,18 +199,14 @@ def _use_http_authorization( password = user["password"] else: if not password: - raise Exception( - "Missing parameter 'password' for HTTP Authorization header" - ) + raise Exception("Missing parameter 'password' for HTTP Authorization header") header_value = "Basic %s" % b64encode( bytes("%s:%s" % (username, password), "utf-8") ).decode("utf-8") elif method == "bearer": - assert ( - token is not None - ), "token (dictionary) is required when using auth method bearer" + assert token is not None, "token (dictionary) is required when using auth method bearer" header_value = "Bearer %s" % generate_test_token(token) self.client.credentials(HTTP_AUTHORIZATION=header_value) @@ -275,16 +263,12 @@ def _create_cr_for_owner(self, pk_for_template_cr, data): data["metadata_provider_user"] = self.token["CSCUserName"] data["metadata_provider_org"] = self.token["schacHomeOrganization"] data["metadata_owner_org"] = self.token["schacHomeOrganization"] - data["data_catalog"][ - "identifier" - ] = django_settings.END_USER_ALLOWED_DATA_CATALOGS[0] + data["data_catalog"]["identifier"] = django_settings.END_USER_ALLOWED_DATA_CATALOGS[0] data.pop("identifier", None) data["research_dataset"].pop("preferred_identifier", None) - response = self.client.post( - f"/rest/{self.api_version}/datasets", data, format="json" - ) + response = self.client.post(f"/rest/{self.api_version}/datasets", data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data["id"] @@ -295,9 +279,7 @@ def get_open_cr_with_files_and_dirs_from_api_with_file_details( from metax_api.models.catalog_record import ACCESS_TYPES # Use http auth to get complete details of the catalog record - self._use_http_authorization( - username="metax" - ) + self._use_http_authorization(username="metax") pk = 13 if set_owner: @@ -306,9 +288,7 @@ def get_open_cr_with_files_and_dirs_from_api_with_file_details( ) pk = self._create_cr_for_owner(pk, response.data) - CatalogRecord.objects.get( - pk=pk - ).calculate_directory_byte_sizes_and_file_counts() + CatalogRecord.objects.get(pk=pk).calculate_directory_byte_sizes_and_file_counts() if use_login_access_type: response_data = self.client.get( @@ -344,28 +324,22 @@ def get_open_cr_with_files_and_dirs_from_api_with_file_details( return response.data - def get_restricted_cr_with_files_and_dirs_from_api_with_file_details( - self, set_owner=False - ): + def get_restricted_cr_with_files_and_dirs_from_api_with_file_details(self, set_owner=False): from metax_api.models import CatalogRecord from metax_api.models.catalog_record import ACCESS_TYPES # Use http auth to get complete details of the catalog record - self._use_http_authorization( - "metax" - ) + self._use_http_authorization("metax") pk = 13 - response = self.client.get( - f"/rest/{self.api_version}/datasets/{pk}?include_user_metadata" - ) + response = self.client.get(f"/rest/{self.api_version}/datasets/{pk}?include_user_metadata") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) data = response.data # Set access_type to restricted - data["research_dataset"]["access_rights"]["access_type"][ - "identifier" - ] = ACCESS_TYPES["restricted"] + data["research_dataset"]["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "restricted" + ] if set_owner: pk = self._create_cr_for_owner(pk, data) @@ -377,9 +351,7 @@ def get_restricted_cr_with_files_and_dirs_from_api_with_file_details( ) self.assertEqual(response.status_code, status.HTTP_200_OK) - CatalogRecord.objects.get( - pk=pk - ).calculate_directory_byte_sizes_and_file_counts() + CatalogRecord.objects.get(pk=pk).calculate_directory_byte_sizes_and_file_counts() response = self.client.get( f"/rest/{self.api_version}/datasets/{pk}?include_user_metadata&file_details" ) @@ -397,42 +369,32 @@ def get_restricted_cr_with_files_and_dirs_from_api_with_file_details( return response.data - def get_embargoed_cr_with_files_and_dirs_from_api_with_file_details( - self, is_available - ): + def get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(self, is_available): from metax_api.models import CatalogRecord from metax_api.models.catalog_record import ACCESS_TYPES # Use http auth to get complete details of the catalog record - self._use_http_authorization( - "metax" - ) + self._use_http_authorization("metax") pk = 13 - response = self.client.get( - f"/rest/{self.api_version}/datasets/{pk}?include_user_metadata" - ) + response = self.client.get(f"/rest/{self.api_version}/datasets/{pk}?include_user_metadata") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) data = response.data # Set access_type to embargo - data["research_dataset"]["access_rights"]["access_type"][ - "identifier" - ] = ACCESS_TYPES["embargo"] + data["research_dataset"]["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ + "embargo" + ] if is_available: data["research_dataset"]["access_rights"]["available"] = "2000-01-01" else: data["research_dataset"]["access_rights"]["available"] = "3000-01-01" - response = self.client.put( - f"/rest/{self.api_version}/datasets/13", data, format="json" - ) + response = self.client.put(f"/rest/{self.api_version}/datasets/13", data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) - CatalogRecord.objects.get( - pk=pk - ).calculate_directory_byte_sizes_and_file_counts() + CatalogRecord.objects.get(pk=pk).calculate_directory_byte_sizes_and_file_counts() response = self.client.get( f"/rest/{self.api_version}/datasets/{pk}?include_user_metadata&file_details" ) @@ -475,9 +437,7 @@ def _get_new_file_data( if not project: project = "research_project_112" if not directory_path: - directory_path = ( - "/prj_112_root/science_data_C/phase_2/2017/10/dir_" + file_n - ) + directory_path = "/prj_112_root/science_data_C/phase_2/2017/10/dir_" + file_n if not file_path: file_path = directory_path + "/file_" + file_n diff --git a/src/metax_api/urls.py b/src/metax_api/urls.py index 8f624a44..0259e75a 100755 --- a/src/metax_api/urls.py +++ b/src/metax_api/urls.py @@ -33,31 +33,31 @@ from metax_api.views.router import view_urlpatterns v1_urls = [ - url('', include(view_urlpatterns)), - url(r'^oai/', oaipmh, name='oai'), - url(r'^rest/', include(rest_api_v1)), - url(r'^rest/v1/', include(rest_api_v1)), - url(r'^rpc/', include(rpc_api_v1)), - url(r'^rpc/v1/', include(rpc_api_v1)), + url("", include(view_urlpatterns)), + url(r"^oai/", oaipmh, name="oai"), + url(r"^rest/", include(rest_api_v1)), + url(r"^rest/v1/", include(rest_api_v1)), + url(r"^rpc/", include(rpc_api_v1)), + url(r"^rpc/v1/", include(rpc_api_v1)), ] v2_urls = [ - url(r'^rest/v2/', include(rest_api_v2)), - url(r'^rpc/v2/', include(rpc_api_v2)), + url(r"^rest/v2/", include(rest_api_v2)), + url(r"^rpc/v2/", include(rpc_api_v2)), ] urlpatterns = [] -if 'v1' in django_settings.API_VERSIONS_ENABLED: +if "v1" in django_settings.API_VERSIONS_ENABLED: urlpatterns += v1_urls -if 'v2' in django_settings.API_VERSIONS_ENABLED: +if "v2" in django_settings.API_VERSIONS_ENABLED: urlpatterns += v2_urls if django_settings.WATCHMAN_CONFIGURED: - urlpatterns += [re_path(r'^watchman/', include('watchman.urls'))] + urlpatterns += [re_path(r"^watchman/", include("watchman.urls"))] if django_settings.DEBUG: urlpatterns += [ - path('__debug__/', include(debug_toolbar.urls)), + path("__debug__/", include(debug_toolbar.urls)), ] diff --git a/src/metax_api/utils/reference_data_loader.py b/src/metax_api/utils/reference_data_loader.py index 392d98b7..26df7679 100755 --- a/src/metax_api/utils/reference_data_loader.py +++ b/src/metax_api/utils/reference_data_loader.py @@ -14,7 +14,7 @@ _logger = logging.getLogger(__name__) -class ReferenceDataLoader(): +class ReferenceDataLoader: REF_DATA_LOAD_NUM = 0 @@ -31,43 +31,54 @@ def populate_cache_reference_data(cls, cache, settings=django_settings): cache: cache object to use for saving settings: override elasticsearch settings in settings.py """ - if not cache.get_or_set('reference_data_load_executing', True, ex=120): - return 'reload_started_by_other' + if not cache.get_or_set("reference_data_load_executing", True, ex=120): + return "reload_started_by_other" - _logger.info('ReferenceDataLoader - populating cache...') + _logger.info("ReferenceDataLoader - populating cache...") if executing_test_case(): - _logger.info('(Note: populating test suite cache)') + _logger.info("(Note: populating test suite cache)") try: reference_data = cls._fetch_reference_data(settings) except: - _logger.exception('Reference data fetch failed') + _logger.exception("Reference data fetch failed") raise - cache.set('reference_data', reference_data) + cache.set("reference_data", reference_data) errors = None - reference_data_check = cache.get('reference_data', master=True) - if 'reference_data' not in reference_data_check.keys(): - _logger.warning('Key reference_data missing from reference data - ' - 'something went wrong during cache population?') + reference_data_check = cache.get("reference_data", master=True) + if "reference_data" not in reference_data_check.keys(): + _logger.warning( + "Key reference_data missing from reference data - " + "something went wrong during cache population?" + ) errors = True raise Exception("reference data loading failed") - if 'organization_data' not in reference_data_check.keys(): - _logger.warning('Key organization_data missing from reference data - ' - 'something went wrong during cache population?') + if "organization_data" not in reference_data_check.keys(): + _logger.warning( + "Key organization_data missing from reference data - " + "something went wrong during cache population?" + ) errors = True if not errors: if not isinstance(settings, dict): settings = settings.ELASTICSEARCH - cache.set('ref_data_up_to_date', True, ex=django_settings.REFERENCE_DATA_RELOAD_INTERVAL) + cache.set( + "ref_data_up_to_date", + True, + ex=django_settings.REFERENCE_DATA_RELOAD_INTERVAL, + ) - _logger.info('ReferenceDataLoader - %s' % ('failed to populate cache' if errors else 'cache populated')) - cache.delete('reference_data_load_executing') + _logger.info( + "ReferenceDataLoader - %s" + % ("failed to populate cache" if errors else "cache populated") + ) + cache.delete("reference_data_load_executing") @classmethod def _fetch_reference_data(cls, settings): @@ -77,7 +88,7 @@ def _fetch_reference_data(cls, settings): settings = settings.ELASTICSEARCH connection_params = cls.get_connection_parameters(settings) - esclient, scan = cls.get_es_imports(settings['HOSTS'], connection_params) + esclient, scan = cls.get_es_imports(settings["HOSTS"], connection_params) reference_data = {} for index_name in esclient.indices.get_mapping().keys(): @@ -87,21 +98,21 @@ def _fetch_reference_data(cls, settings): # if local elasticsearch is not used. aggr_types = esclient.search( index=index_name, - body={"aggs": { "types": {"terms": {"field": "type", "size": 30}}}}, - filter_path='aggregations', - _source='type', - scroll='1m' + body={"aggs": {"types": {"terms": {"field": "type", "size": 30}}}}, + filter_path="aggregations", + _source="type", + scroll="1m", ) - for type_name in [ b['key'] for b in aggr_types['aggregations']['types']['buckets'] ]: + for type_name in [b["key"] for b in aggr_types["aggregations"]["types"]["buckets"]]: reference_data[index_name][type_name] = [] # must use wildcard query here because organization_data does not have their 'type' # field indexed in the old 5.8 version.. This is fixed in the new ES cluster so this could # be changed after all envs is using the new version. all_rows = scan( esclient, - query={'query': {'wildcard': {'id': {'value': f'{type_name}*'}}}}, - index=index_name + query={"query": {"wildcard": {"id": {"value": f"{type_name}*"}}}}, + index=index_name, ) for row in all_rows: @@ -111,44 +122,58 @@ def _fetch_reference_data(cls, settings): try: # should always be present - entry = { 'uri': row['_source']['uri'] } + entry = {"uri": row["_source"]["uri"]} except KeyError: - _logger.warning('Elasticsearch document missing uri in index {0} type {1}: {2}'.format( - index_name, type_name, row)) + _logger.warning( + "Elasticsearch document missing uri in index {0} type {1}: {2}".format( + index_name, type_name, row + ) + ) continue try: # should always be present - entry['code'] = row['_source']['code'] + entry["code"] = row["_source"]["code"] except KeyError: # error, but not blocking. place key 'code' anyway so that the existence # of the key does not have to be checked elsewhere - entry['code'] = None - _logger.warning('Elasticsearch document missing code in index {0} type {1}: {2}'.format( - index_name, type_name, row)) + entry["code"] = None + _logger.warning( + "Elasticsearch document missing code in index {0} type {1}: {2}".format( + index_name, type_name, row + ) + ) - label = row['_source'].get('label', None) - scheme = row['_source'].get('scheme', None) + label = row["_source"].get("label", None) + scheme = row["_source"].get("scheme", None) # dont want empty dicts loitering in the cache if label: - entry['label'] = label + entry["label"] = label if scheme: - entry['scheme'] = scheme + entry["scheme"] = scheme - if type_name == 'location': - entry['wkt'] = row['_source'].get('wkt', None) + if type_name == "location": + entry["wkt"] = row["_source"].get("wkt", None) - if type_name == 'license' and 'same_as' in row['_source'] and len(row['_source']['same_as']) > 0: - entry['same_as'] = row['_source']['same_as'][0] + if ( + type_name == "license" + and "same_as" in row["_source"] + and len(row["_source"]["same_as"]) > 0 + ): + entry["same_as"] = row["_source"]["same_as"][0] - if type_name == 'file_format_version': - entry['input_file_format'] = row['_source'].get('input_file_format', None) - entry['output_format_version'] = row['_source'].get('output_format_version', None) + if type_name == "file_format_version": + entry["input_file_format"] = row["_source"].get("input_file_format", None) + entry["output_format_version"] = row["_source"].get( + "output_format_version", None + ) - if type_name == 'organization' and row['_source'].get('parent_id', False): - entry['parent_org_code'] = row['_source']['parent_id'][len('organization_'):] + if type_name == "organization" and row["_source"].get("parent_id", False): + entry["parent_org_code"] = row["_source"]["parent_id"][ + len("organization_") : + ] reference_data[index_name][type_name].append(entry) @@ -159,12 +184,18 @@ def get_connection_parameters(settings): """ https://docs.objectrocket.com/elastic_python_examples.html """ - if settings['HOSTS'][0] != 'localhost': - conf = { 'send_get_body_as': 'GET' } - if settings.get('USE_SSL', False): - conf.update({ 'port': 443, 'use_ssl': True, 'verify_certs': True, }) - if settings.get('PORT', False): - conf.update({ 'port': settings['PORT'] }) + if settings["HOSTS"][0] != "localhost": + conf = {"send_get_body_as": "GET"} + if settings.get("USE_SSL", False): + conf.update( + { + "port": 443, + "use_ssl": True, + "verify_certs": True, + } + ) + if settings.get("PORT", False): + conf.update({"port": settings["PORT"]}) return conf _logger.warning("returning empty connection parameters") return {} diff --git a/src/metax_api/utils/utils.py b/src/metax_api/utils/utils.py index 759427ca..4871d077 100755 --- a/src/metax_api/utils/utils.py +++ b/src/metax_api/utils/utils.py @@ -17,11 +17,11 @@ class IdentifierType(Enum): - URN = 'urn' - DOI = 'doi' + URN = "urn" + DOI = "doi" -class DelayedLog(): +class DelayedLog: """ A callable that can be passed to CallableService as a post_request_callable, @@ -40,16 +40,16 @@ def executing_test_case(): """ Returns True whenever code is being executed by automatic test cases """ - return 'test' in sys.argv + return "test" in sys.argv def datetime_to_str(date_obj): if isinstance(date_obj, datetime): - return date_obj.strftime('%Y-%m-%dT%H:%M:%SZ') + return date_obj.strftime("%Y-%m-%dT%H:%M:%SZ") elif datetime is None: return None else: - assert isinstance(date_obj, datetime), 'date_obj must be datetime object or None' + assert isinstance(date_obj, datetime), "date_obj must be datetime object or None" def parse_timestamp_string_to_tz_aware_datetime(timestamp_str): @@ -82,7 +82,7 @@ def get_tz_aware_now_without_micros(): def generate_uuid_identifier(urn_prefix=False): if urn_prefix: - return 'urn:nbn:fi:att:%s' % str(uuid4()) + return "urn:nbn:fi:att:%s" % str(uuid4()) return str(uuid4()) @@ -93,10 +93,15 @@ def is_metax_generated_doi_identifier(identifier): :param identifier: :return: boolean """ - if not identifier or not hasattr(settings, 'DATACITE') or not settings.DATACITE.get('PREFIX', False): + if ( + not identifier + or not hasattr(settings, "DATACITE") + or not settings.DATACITE.get("PREFIX", False) + ): return False - return identifier.startswith('doi:{0}/'.format(settings.DATACITE.get('PREFIX'))) + return identifier.startswith("doi:{0}/".format(settings.DATACITE.get("PREFIX"))) + def is_remote_doi_identifier(identifier): """ @@ -105,12 +110,15 @@ def is_remote_doi_identifier(identifier): :param identifier: :return: boolean """ - if not identifier or not settings.DATACITE.get('PREFIX', False): + if not identifier or not settings.DATACITE.get("PREFIX", False): return False - if identifier.startswith('doi:') and not identifier.startswith('doi:{0}/'.format(settings.DATACITE.get('PREFIX'))): + if identifier.startswith("doi:") and not identifier.startswith( + "doi:{0}/".format(settings.DATACITE.get("PREFIX")) + ): return True + def is_metax_generated_urn_identifier(identifier): """ Check whether given identifier is a metax generated urn identifier @@ -121,7 +129,7 @@ def is_metax_generated_urn_identifier(identifier): if not identifier: return False - return identifier.startswith('urn:nbn:fi:att:') or identifier.startswith('urn:nbn:fi:csc') + return identifier.startswith("urn:nbn:fi:att:") or identifier.startswith("urn:nbn:fi:csc") def generate_doi_identifier(doi_suffix=None): @@ -135,13 +143,13 @@ def generate_doi_identifier(doi_suffix=None): doi_suffix = generate_uuid_identifier() doi_prefix = None - if hasattr(settings, 'DATACITE'): - doi_prefix = settings.DATACITE.get('PREFIX', None) + if hasattr(settings, "DATACITE"): + doi_prefix = settings.DATACITE.get("PREFIX", None) if not doi_prefix: raise Exception("PREFIX must be defined in settings DATACITE dictionary") if not doi_suffix: raise ValueError("DOI suffix must be provided in order to create a DOI identifier") - return 'doi:{0}/{1}'.format(doi_prefix, doi_suffix) + return "doi:{0}/{1}".format(doi_prefix, doi_suffix) def extract_doi_from_doi_identifier(doi_identifier): @@ -152,16 +160,16 @@ def extract_doi_from_doi_identifier(doi_identifier): :param doi_identifier: Must start with doi:10. for this method to work properly :return: If the doi_identifier does not start with doi:10., return None. Otherwise return doi starting from 10. """ - if doi_identifier and doi_identifier.startswith('doi:10.'): - return doi_identifier[doi_identifier.index('10.'):] + if doi_identifier and doi_identifier.startswith("doi:10."): + return doi_identifier[doi_identifier.index("10.") :] return None def get_identifier_type(identifier): if identifier: - if identifier.startswith('doi:'): + if identifier.startswith("doi:"): return IdentifierType.DOI - elif identifier.startswith('urn:'): + elif identifier.startswith("urn:"): return IdentifierType.URN return None @@ -176,11 +184,16 @@ def remove_keys_recursively(obj, fields_to_remove): """ if isinstance(obj, dict): obj = { - key: remove_keys_recursively(value, fields_to_remove) for key, value in obj.items() + key: remove_keys_recursively(value, fields_to_remove) + for key, value in obj.items() if key not in fields_to_remove } elif isinstance(obj, list): - obj = [remove_keys_recursively(item, fields_to_remove) for item in obj if item not in fields_to_remove] + obj = [ + remove_keys_recursively(item, fields_to_remove) + for item in obj + if item not in fields_to_remove + ] return obj @@ -200,8 +213,8 @@ def leave_keys_in_dict(dict_obj, fields_to_leave): if executing_test_case(): - class TestJsonLogger(): + class TestJsonLogger: def info(self, *args, **kwargs): pass @@ -216,4 +229,4 @@ def debug(self, *args, **kwargs): json_logger = TestJsonLogger() else: - json_logger = structlog.get_logger('structlog') + json_logger = structlog.get_logger("structlog") diff --git a/src/metax_api/views/router.py b/src/metax_api/views/router.py index 02a44b82..9f820219 100755 --- a/src/metax_api/views/router.py +++ b/src/metax_api/views/router.py @@ -10,6 +10,6 @@ from metax_api.views.secure import secure_view view_urlpatterns = [ - url(r'^logout?', secure_view.SecureLogoutView.as_view()), - url(r'^secure/login?', secure_view.SecureLoginView.as_view()), + url(r"^logout?", secure_view.SecureLogoutView.as_view()), + url(r"^secure/login?", secure_view.SecureLoginView.as_view()), ] diff --git a/src/metax_api/views/secure/secure_view.py b/src/metax_api/views/secure/secure_view.py index ed9fd4ad..2c4cbba5 100755 --- a/src/metax_api/views/secure/secure_view.py +++ b/src/metax_api/views/secure/secure_view.py @@ -19,47 +19,49 @@ class SecureLoginView(TemplateView): - def get(self, request, **kwargs): """ Reached through a redirect after a successful OIDC authentication. Parse the received id_token, and show selected contents of it to the user on a web page. """ - _logger.debug('extracting information from token') + _logger.debug("extracting information from token") - token_payload = json.loads(request.META['HTTP_OIDC_ID_TOKEN_PAYLOAD']) + token_payload = json.loads(request.META["HTTP_OIDC_ID_TOKEN_PAYLOAD"]) _logger.debug(token_payload) try: json_logger.info( - event='user_login_visit', - user_id=token_payload.get('CSCUserName', token_payload['eppn']), - org_id=token_payload.get('schacHomeOrganization', 'org_missing'), + event="user_login_visit", + user_id=token_payload.get("CSCUserName", token_payload["eppn"]), + org_id=token_payload.get("schacHomeOrganization", "org_missing"), ) except KeyError: - _logger.error('token_payload has no CSCUserName or eppn') + _logger.error("token_payload has no CSCUserName or eppn") - idm_account_exists = len(token_payload.get('CSCUserName', '')) > 0 + idm_account_exists = len(token_payload.get("CSCUserName", "")) > 0 - home_org_exists = len(token_payload.get('schacHomeOrganization', '')) > 0 + home_org_exists = len(token_payload.get("schacHomeOrganization", "")) > 0 context = { - 'email': token_payload['email'], - 'idm_account_exists': idm_account_exists, - 'home_org_exists': home_org_exists, - 'token_string': request.META['HTTP_OIDC_ID_TOKEN'] if idm_account_exists and home_org_exists else '', - 'token_valid_until': datetime.fromtimestamp(token_payload['exp']).strftime('%Y-%m-%d %H:%M:%S'), - 'haka_exists': 'eppn' in token_payload, - 'logout_redirect_domain': django_settings.SERVER_DOMAIN_NAME, + "email": token_payload["email"], + "idm_account_exists": idm_account_exists, + "home_org_exists": home_org_exists, + "token_string": request.META["HTTP_OIDC_ID_TOKEN"] + if idm_account_exists and home_org_exists + else "", + "token_valid_until": datetime.fromtimestamp(token_payload["exp"]).strftime( + "%Y-%m-%d %H:%M:%S" + ), + "haka_exists": "eppn" in token_payload, + "logout_redirect_domain": django_settings.SERVER_DOMAIN_NAME, } # note: django automatically searches templates from root directory templates/ - return render(request, 'secure/auth_success.html', context=context) + return render(request, "secure/auth_success.html", context=context) class SecureLogoutView(TemplateView): - def get(self, request, **kwargs): """ After local oidc logout, redirect to OP for OP's logout procedures. From d3527b498e8d346a670a46a15785eeaba9d535a5 Mon Sep 17 00:00:00 2001 From: Toni Date: Fri, 28 May 2021 10:01:07 +0300 Subject: [PATCH 002/160] run black & isort 2 --- pyproject.toml | 4 ++-- .../management/commands/update_orgs.py | 4 +++- .../add_last_modified_header_to_response.py | 4 +--- src/metax_api/middleware/identifyapicaller.py | 11 +++-------- src/metax_api/middleware/request_logging.py | 16 ++++------------ src/metax_api/settings/components/externals.py | 5 ++++- .../settings/environments/unittests.py | 18 +++++++++++++++--- .../domain/organization_data.py | 4 +--- .../refdata_indexer/domain/reference_data.py | 4 +--- .../refdata_indexer/organization_csv_parser.py | 4 +--- .../service/elasticsearch_service.py | 18 ++++-------------- .../service/infra_data_service.py | 4 +++- .../service/local_data_service.py | 4 +++- .../service/mime_data_service.py | 12 +++--------- src/metax_api/tests/models/__init__.py | 6 +++++- 15 files changed, 53 insertions(+), 65 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c5a5cb2c..d18f17f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,11 +54,11 @@ known_first_party = ["metax_api", "src/metax_api"] skip_glob = "*/models/__init__.py,__init__.py" include_trailing_comma = true combine_as_imports = true -line_length = 120 +line_length = 100 [tool.black] target-version = ['py38'] -line-length = 120 +line-length = 100 exclude = "/migrations/" [build-system] diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index 46515fb6..e47c39f0 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -69,7 +69,9 @@ def __str__(self): def get_orgs_from_api() -> List[Organization]: - res = requests.get("https://researchfi-api-production-researchfi.rahtiapp.fi/portalapi/organization/_search") + res = requests.get( + "https://researchfi-api-production-researchfi.rahtiapp.fi/portalapi/organization/_search" + ) data = res.json() orgs_json = data["hits"]["hits"] diff --git a/src/metax_api/middleware/add_last_modified_header_to_response.py b/src/metax_api/middleware/add_last_modified_header_to_response.py index 883cf498..d2461fa0 100755 --- a/src/metax_api/middleware/add_last_modified_header_to_response.py +++ b/src/metax_api/middleware/add_last_modified_header_to_response.py @@ -63,9 +63,7 @@ def _add_last_modified_header_to_response(response): if modified: modified_dt = parse_timestamp_string_to_tz_aware_datetime(modified) if modified_dt: - date_modified_in_gmt = timezone.localtime( - modified_dt, timezone=tz("GMT") - ) + date_modified_in_gmt = timezone.localtime(modified_dt, timezone=tz("GMT")) response["Last-Modified"] = date_modified_in_gmt.strftime( "%a, %d %b %Y %H:%M:%S GMT" ) diff --git a/src/metax_api/middleware/identifyapicaller.py b/src/metax_api/middleware/identifyapicaller.py index 3859f233..85b04e36 100755 --- a/src/metax_api/middleware/identifyapicaller.py +++ b/src/metax_api/middleware/identifyapicaller.py @@ -117,8 +117,7 @@ def _identify_api_caller(self, request): { "detail": [ "Invalid HTTP authorization method. Ensure you included on of the following " - "methods inside the auth header: %s" - % ", ".join(self.ALLOWED_AUTH_METHODS) + "methods inside the auth header: %s" % ", ".join(self.ALLOWED_AUTH_METHODS) ] } ) @@ -139,9 +138,7 @@ def _identify_api_caller(self, request): elif auth_method.lower() == "bearer": self._auth_bearer(request, auth_b64) else: - raise Exception( - "The allowed auth method %s is missing handling" % auth_method - ) + raise Exception("The allowed auth method %s is missing handling" % auth_method) return request @@ -194,9 +191,7 @@ def _auth_bearer(self, request, auth_b64): if len(token.get("CSCUserName", "")) > 0: request.user.username = token["CSCUserName"] else: - _logger.warning( - "id_token does not contain valid user id: fairdataid or cscusername" - ) + _logger.warning("id_token does not contain valid user id: fairdataid or cscusername") raise Http403 request.user.is_service = False diff --git a/src/metax_api/middleware/request_logging.py b/src/metax_api/middleware/request_logging.py index 827b7057..0f90ed52 100755 --- a/src/metax_api/middleware/request_logging.py +++ b/src/metax_api/middleware/request_logging.py @@ -49,29 +49,21 @@ def get_username(self, request): if "Basic" in auth_header: try: user_type = "service" - user = ( - b64decode(auth_header.split(" ")[1]).decode("utf-8").split(":")[0] - ) + user = b64decode(auth_header.split(" ")[1]).decode("utf-8").split(":")[0] except: _logger.exception("Could not extract username from http auth header") elif "Bearer" in auth_header or "bearer" in auth_header: try: user_type = "end_user" user = json_loads( - b64decode(auth_header.split(" ")[1].split(".")[1] + "===").decode( - "utf-8" - ) + b64decode(auth_header.split(" ")[1].split(".")[1] + "===").decode("utf-8") )["CSCUserName"] except: # dont log as an error or crash, since we dont want to get bothered by # errors about malformed tokens. auth middleware is going to reject this # token later too. - _logger.info( - "Faulty token: Could not extract username from bearer token" - ) + _logger.info("Faulty token: Could not extract username from bearer token") else: - _logger.info( - "HTTP Auth method not basic or bearer - unable to get username" - ) + _logger.info("HTTP Auth method not basic or bearer - unable to get username") return user, user_type diff --git a/src/metax_api/settings/components/externals.py b/src/metax_api/settings/components/externals.py index 93590ac4..b27fb9db 100755 --- a/src/metax_api/settings/components/externals.py +++ b/src/metax_api/settings/components/externals.py @@ -1,5 +1,8 @@ from metax_api.settings import env -from metax_api.settings.components.common import ATT_DATA_CATALOG_IDENTIFIER, IDA_DATA_CATALOG_IDENTIFIER +from metax_api.settings.components.common import ( + ATT_DATA_CATALOG_IDENTIFIER, + IDA_DATA_CATALOG_IDENTIFIER, +) OAI = { "BASE_URL": env("OAI_BASE_URL"), diff --git a/src/metax_api/settings/environments/unittests.py b/src/metax_api/settings/environments/unittests.py index 3a279479..631d9182 100755 --- a/src/metax_api/settings/environments/unittests.py +++ b/src/metax_api/settings/environments/unittests.py @@ -27,9 +27,21 @@ api_permissions.rest.datacatalogs["update"] += [Role.TEST_USER] api_permissions.rest.datacatalogs.delete += [Role.TEST_USER] -api_permissions.rest.datasets.create += [Role.API_AUTH_USER, Role.EXTERNAL, Role.TEST_USER] -api_permissions.rest.datasets["update"] += [Role.API_AUTH_USER, Role.EXTERNAL, Role.TEST_USER] -api_permissions.rest.datasets.delete += [Role.API_AUTH_USER, Role.EXTERNAL, Role.TEST_USER] +api_permissions.rest.datasets.create += [ + Role.API_AUTH_USER, + Role.EXTERNAL, + Role.TEST_USER, +] +api_permissions.rest.datasets["update"] += [ + Role.API_AUTH_USER, + Role.EXTERNAL, + Role.TEST_USER, +] +api_permissions.rest.datasets.delete += [ + Role.API_AUTH_USER, + Role.EXTERNAL, + Role.TEST_USER, +] api_permissions.rest.directories.read += [Role.TEST_USER] diff --git a/src/metax_api/tasks/refdata/refdata_indexer/domain/organization_data.py b/src/metax_api/tasks/refdata/refdata_indexer/domain/organization_data.py index d2268f78..082da01a 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/domain/organization_data.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/domain/organization_data.py @@ -12,9 +12,7 @@ class OrganizationData(IndexableData): """ ORG_PURL_BASE_URL = ( - "http://uri.suomi.fi/codelist/fairdata/" - + IndexableData.DATA_TYPE_ORGANIZATION - + "/code/" + "http://uri.suomi.fi/codelist/fairdata/" + IndexableData.DATA_TYPE_ORGANIZATION + "/code/" ) def __init__(self, org_id, label, parent_id="", same_as=[], org_csc="", scheme=""): diff --git a/src/metax_api/tasks/refdata/refdata_indexer/domain/reference_data.py b/src/metax_api/tasks/refdata/refdata_indexer/domain/reference_data.py index 132904fc..7de9c5d6 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/domain/reference_data.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/domain/reference_data.py @@ -77,9 +77,7 @@ def __init__( internal_code="", ): - super(ReferenceData, self).__init__( - data_id, data_type, label, uri, same_as, scheme - ) + super(ReferenceData, self).__init__(data_id, data_type, label, uri, same_as, scheme) self.wkt = wkt self.input_file_format = input_file_format diff --git a/src/metax_api/tasks/refdata/refdata_indexer/organization_csv_parser.py b/src/metax_api/tasks/refdata/refdata_indexer/organization_csv_parser.py index 5a174e55..7aa6d56c 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/organization_csv_parser.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/organization_csv_parser.py @@ -62,9 +62,7 @@ def parse_csv(): # otherwise create an org and append it to existing root's hierarchy if unit_sub_code and unit_name: - organization_code = "-".join( - [org_code, unit_sub_code] - ) # Unique + organization_code = "-".join([org_code, unit_sub_code]) # Unique parent_id = root_orgs.get(org_code, None) output_orgs.append( create_organization( diff --git a/src/metax_api/tasks/refdata/refdata_indexer/service/elasticsearch_service.py b/src/metax_api/tasks/refdata/refdata_indexer/service/elasticsearch_service.py index d769e3e0..d3c03bef 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/service/elasticsearch_service.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/service/elasticsearch_service.py @@ -33,9 +33,7 @@ def index_exists(self, index): def create_index(self, index, filename): _logger.info("Trying to create index " + index) return self._operation_ok( - self.es.indices.create( - index=index, body=self._get_json_file_as_str(filename) - ) + self.es.indices.create(index=index, body=self._get_json_file_as_str(filename)) ) def delete_index(self, index): @@ -54,24 +52,16 @@ def delete_and_update_indexable_data(self, index, doc_type, indexable_data_list) ) self._delete_all_documents_from_index_with_type(index, doc_type) _logger.info( - "Trying to bulk update reference data with type " - + doc_type - + " to index " - + index + "Trying to bulk update reference data with type " + doc_type + " to index " + index ) - return self._operation_ok( - self.es.bulk(body=bulk_update_str, request_timeout=30) - ) + return self._operation_ok(self.es.bulk(body=bulk_update_str, request_timeout=30)) return None def _delete_all_documents_from_index_with_type(self, index, doc_type): _logger.info( - "Trying to delete all documents from index " - + index - + " having type " - + doc_type + "Trying to delete all documents from index " + index + " having type " + doc_type ) return self._operation_ok( self.es.delete_by_query( diff --git a/src/metax_api/tasks/refdata/refdata_indexer/service/infra_data_service.py b/src/metax_api/tasks/refdata/refdata_indexer/service/infra_data_service.py index db118904..0a958fd9 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/service/infra_data_service.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/service/infra_data_service.py @@ -20,7 +20,9 @@ class InfraDataService: so it is first fetched and parsed. """ - INFRA_REF_DATA_SOURCE_URL = "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures" + INFRA_REF_DATA_SOURCE_URL = ( + "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures" + ) TEMP_FILENAME = "/tmp/data.json" diff --git a/src/metax_api/tasks/refdata/refdata_indexer/service/local_data_service.py b/src/metax_api/tasks/refdata/refdata_indexer/service/local_data_service.py index 24d99597..040ec24d 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/service/local_data_service.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/service/local_data_service.py @@ -19,7 +19,9 @@ class LocalDataService: # move infra here beacuse the fetch API for it is broken and there is no estimate when it could be fixed. # to keep the reference data unchanged, use the old scheme until some fix for it has been invented and # validated. - INFRA_SCHEME = "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures" + INFRA_SCHEME = ( + "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures" + ) def get_data(self, data_type): return self._parse_local_reference_data(data_type) diff --git a/src/metax_api/tasks/refdata/refdata_indexer/service/mime_data_service.py b/src/metax_api/tasks/refdata/refdata_indexer/service/mime_data_service.py index 9484292e..09eb70e8 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/service/mime_data_service.py +++ b/src/metax_api/tasks/refdata/refdata_indexer/service/mime_data_service.py @@ -21,9 +21,7 @@ class MimeDataService: """ IANA_NS = "{http://www.iana.org/assignments}" - MIME_TYPE_REF_DATA_SOURCE_URL = ( - "https://www.iana.org/assignments/media-types/media-types.xml" - ) + MIME_TYPE_REF_DATA_SOURCE_URL = "https://www.iana.org/assignments/media-types/media-types.xml" MIME_TYPE_REGISTRY_IDS = [ "application", "audio", @@ -57,9 +55,7 @@ def _parse_mime_data(self): is_parsing_model_elem = False found_valid_file_elem = False found_valid_name_elem = False - for event, elem in ET.iterparse( - self.TEMP_XML_FILENAME, events=("start", "end") - ): + for event, elem in ET.iterparse(self.TEMP_XML_FILENAME, events=("start", "end")): if event == "start": if ( elem.tag == (self.IANA_NS + "registry") @@ -84,9 +80,7 @@ def _parse_mime_data(self): ): if elem.text: found_valid_file_elem = True - uri = ( - "https://www.iana.org/assignments/media-types/" + elem.text - ) + uri = "https://www.iana.org/assignments/media-types/" + elem.text data_id = elem.text elif event == "end": if elem.tag == self.IANA_NS + "registry": diff --git a/src/metax_api/tests/models/__init__.py b/src/metax_api/tests/models/__init__.py index 2c2ac851..44f3e738 100755 --- a/src/metax_api/tests/models/__init__.py +++ b/src/metax_api/tests/models/__init__.py @@ -5,7 +5,11 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from .catalog_record import CatalogRecordModelBasicTest, CatalogRecordModelTests, CatalogRecordManagerTests +from .catalog_record import ( + CatalogRecordModelBasicTest, + CatalogRecordModelTests, + CatalogRecordManagerTests, +) from .common import * from .data_catalog import DataCatalogModelTests from .directory import DirectoryModelTests From 7fa054e391955de8267db8a0e5a83f5349ce4d54 Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 24 May 2021 15:48:21 +0300 Subject: [PATCH 003/160] change dependency install method in Dockerfile to pip --- Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 54133ab5..ba56839b 100755 --- a/Dockerfile +++ b/Dockerfile @@ -8,9 +8,11 @@ RUN mkdir -p /var/log/metax-api/errors && touch /var/log/metax-api/metax-api.jso RUN apt-get update && apt install xqilla libxerces-c-dev build-essential libssl-dev libffi-dev python-dev libxqilla-dev -y RUN pip install --upgrade pip wheel poetry -COPY pyproject.toml poetry.lock /code/ +COPY pyproject.toml poetry.lock requirements.txt /code/ WORKDIR /code -RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi --extras "docs simplexquery" + +# RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi --extras "docs simplexquery" +RUN pip install -r requirements.txt EXPOSE 8008 EXPOSE 8006 From 89f70c567e06f00bef021355d24da9badefa8fa4 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 25 May 2021 09:20:00 +0300 Subject: [PATCH 004/160] cleanup --- Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index ba56839b..99663f2b 100755 --- a/Dockerfile +++ b/Dockerfile @@ -7,11 +7,10 @@ RUN mkdir -p /var/log/metax-api/errors && touch /var/log/metax-api/metax-api.jso RUN apt-get update && apt install xqilla libxerces-c-dev build-essential libssl-dev libffi-dev python-dev libxqilla-dev -y -RUN pip install --upgrade pip wheel poetry -COPY pyproject.toml poetry.lock requirements.txt /code/ +RUN pip install --upgrade pip wheel +COPY requirements.txt /code/ WORKDIR /code -# RUN poetry config virtualenvs.create false && poetry install --no-interaction --no-ansi --extras "docs simplexquery" RUN pip install -r requirements.txt EXPOSE 8008 From ee7554b6a786ea3a2e9c10d5b71f7b15d34229ea Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 31 May 2021 12:45:04 +0300 Subject: [PATCH 005/160] delete v2 apierrors test to fix the issue - apierrors API is the same for v1 and v2 so deleting either one of the test cases is ok. Delete v2 tests if this api is changed in the future --- .../api/rest/v2/views/apierrors/__init__.py | 8 - .../tests/api/rest/v2/views/apierrors/read.py | 163 ------------------ 2 files changed, 171 deletions(-) delete mode 100755 src/metax_api/tests/api/rest/v2/views/apierrors/__init__.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/apierrors/read.py diff --git a/src/metax_api/tests/api/rest/v2/views/apierrors/__init__.py b/src/metax_api/tests/api/rest/v2/views/apierrors/__init__.py deleted file mode 100755 index 0ffba528..00000000 --- a/src/metax_api/tests/api/rest/v2/views/apierrors/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from .read import * diff --git a/src/metax_api/tests/api/rest/v2/views/apierrors/read.py b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py deleted file mode 100755 index ace87cd8..00000000 --- a/src/metax_api/tests/api/rest/v2/views/apierrors/read.py +++ /dev/null @@ -1,163 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from os import makedirs -from shutil import rmtree - -from django.conf import settings -from django.core.management import call_command -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class ApiErrorReadBasicTests(APITestCase, TestClassUtils): - - """ - Basic read operations - """ - - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(ApiErrorReadBasicTests, cls).setUpClass() - - def setUp(self): - super(ApiErrorReadBasicTests, self).setUp() - rmtree(settings.ERROR_FILES_PATH, ignore_errors=True) - makedirs(settings.ERROR_FILES_PATH) - self._use_http_authorization(username="metax") - - def _assert_fields_presence(self, response): - """ - Check presence and absence of some key information. - """ - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("data" in response.data, True, response.data) - self.assertEqual("response" in response.data, True, response.data) - self.assertEqual("traceback" in response.data, True, response.data) - self.assertEqual("url" in response.data, True, response.data) - self.assertEqual( - "HTTP_AUTHORIZATION" in response.data["headers"], - False, - response.data["headers"], - ) - - def test_list_errors(self): - """ - Each requesting resulting in an error should leave behind one API error entry. - """ - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1.pop("id") - cr_1.pop("identifier") - cr_1.pop("data_catalog") # causes an error - - response = self.client.post("/rest/v2/datasets", cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.post("/rest/v2/datasets", cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - response = self.client.get("/rest/v2/apierrors") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_get_error_details(self): - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1.pop("id") - cr_1.pop("identifier") - cr_1.pop("data_catalog") # causes an error - cr_1["research_dataset"]["title"] = {"en": "Abc"} - - response = self.client.post("/rest/v2/datasets", cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # list errors in order to get error identifier - response = self.client.get("/rest/v2/apierrors") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("identifier" in response.data[0], True, response.data) - - response = self.client.get("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) - self._assert_fields_presence(response) - self.assertEqual( - "data_catalog" in response.data["response"], True, response.data["response"] - ) - self.assertEqual( - response.data["data"]["research_dataset"]["title"]["en"], - "Abc", - response.data["data"]["research_dataset"]["title"], - ) - - def test_delete_error_details(self): - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1.pop("id") - cr_1.pop("identifier") - cr_1.pop("data_catalog") # causes an error - - response = self.client.post("/rest/v2/datasets", cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - response = self.client.get("/rest/v2/apierrors") - response = self.client.delete("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - - response = self.client.get("/rest/v2/apierrors") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_delete_all_error_details(self): - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1.pop("id") - cr_1.pop("identifier") - cr_1.pop("data_catalog") # causes an error - - response = self.client.post("/rest/v2/datasets", cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - response = self.client.post("/rest/v2/datasets", cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - - # ensure something was produced... - response = self.client.get("/rest/v2/apierrors") - - response = self.client.post("/rest/v2/apierrors/flush") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get("/rest/v2/apierrors") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_bulk_operation_produces_error_entry(self): - """ - Ensure also bulk operations produce error entries. - """ - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1.pop("id") - cr_1.pop("identifier") - cr_1.pop("data_catalog") # causes an error - response = self.client.post("/rest/v2/datasets", [cr_1, cr_1], format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - response = self.client.get("/rest/v2/apierrors") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) - self._assert_fields_presence(response) - self.assertEqual("other" in response.data, True, response.data) - self.assertEqual("bulk_request" in response.data["other"], True, response.data) - self.assertEqual("data_row_count" in response.data["other"], True, response.data) - - def test_api_permitted_only_to_metax_user(self): - # uses testuser by default - self._use_http_authorization() - response = self.client.get("/rest/v2/apierrors") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.get("/rest/v2/apierrors/123") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.delete("/rest/v2/apierrors/123") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.post("/rest/v2/apierrors/flush_errors") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) From 8cbd248de83b8030df0feee9b6722182b2d90cc0 Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 24 May 2021 15:43:28 +0300 Subject: [PATCH 006/160] add log messages --- src/metax_api/management/commands/update_orgs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index e47c39f0..5e9485fa 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -129,7 +129,7 @@ def handle(self, *args, **options): for a in api_orgs: match, changes = i.compare_and_update(a) if match and changes > 0: - logger.info(f"updated org {i.org_code} with {changes} changes") + logger.info(f"updated org {i.org_name_fi} with {changes} changes in unit: {a.unit_name}") union.append(i) # add missing orgs to local ones added = 0 @@ -138,6 +138,7 @@ def handle(self, *args, **options): for a in loc_orgs: match = i.compare(a) if not match: + logger.info(f"adding missing org {i.org_name_fi}, {i.unit_name}") union.append(i) added += 1 logger.info(f"Added {added} organisations from research.fi to local org list") From 4c5e845440b1bb506bfdb73535053f2d823237f8 Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 24 May 2021 15:54:33 +0300 Subject: [PATCH 007/160] change dependency install method in Dockerfile to pip --- .../resources/organizations/organizations.csv | 6074 +++++++++++------ 1 file changed, 3990 insertions(+), 2084 deletions(-) diff --git a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv index 933af772..bd748565 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv +++ b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv @@ -1,90 +1,111 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_name,org_isni,org_csc -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,,,http://isni.org/isni/0000000108389418,2 -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A800,"School services, ARTS",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A801,"Department of Film, Television and Scenography",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A802,Department of Media,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A803,Department of Design,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A805,Department of Art,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A850,Aalto Future Media Center,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,A899,School Common ARTS,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E700,"School Services, BIZ",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E701,Department of Accounting,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E702,Department of Marketing,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E703,Department of Economics,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E704,Department of Information and Service Economy,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E706,Department of Management Studies,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E707,Department of Finance,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E710,Center for Markets in Transition (CEMAT),, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E720,Center for Knowledge and Innovation Research (CKIR),, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,E790,"School Common, BIZ",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T100,"School services, CHEM",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T101,Department of Biotechnology and Chemical Technology,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T102,Department of Chemistry,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T103,Department of Materials Science and Engineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T104,Department of Forest Products Technology,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T105,Department of Chemistry and Materials Science,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T106,Department of Chemical and Metallurgical Engineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T107,Department of Bioproducts and Biosystems,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T199,"Shool common, CHEM",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T200,"School cervices, ENG",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T201,Department of Architecture,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T210,Design Factory,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T211,"School common, ENG",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T212,Department of Mechanical Engineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T213,Department of Built Environment,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T214,Department of Civil Engineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T300,School services SCI,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T302,Department of Mathematics and Systems Analysis,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T304,Department of Applied Physics,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T307,Department of Industrial Engineering and Management,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T310,EIT Digital Helsinki,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T313,Department of Computer Science,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T314,Department of Neuroscience and Biomedical Engineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T320,"School common, SCI",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T400,School services ELEC,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T403,Deptartment of Micro & Nanoscience,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T404,Department of Radio Science & Engineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T405,Department of Signal Processing & Acoustics,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T407,Department of Communications & Networking,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T408,MetsƤhovi Radio Observatory,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T409,Aalto Nanofab,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T410,Department of Electrical Engineering & Automation,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T411,Department of Electronics & Nanoengineering,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,T499,"School common, ELEC",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U100,University level,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U900,"Leadership and LS Support Serv, Joint Units",, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U901,Research and Innovation Serv,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U902,Learning Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U905,Finance Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U906,HR Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U908,Communications Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U909,IT Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U912,Campus Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U915,Secretarial Services,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U917,Fundraising,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U918,Endowment,, -Aalto yliopisto,Aalto University,Aalto universitetet,10076,,U920,Aalto Common Items,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,,,http://isni.org/isni/0000000110165683,3556 -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Liiktal,Liiketalouden koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Kemtek,Kemiantekniikan koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Tiettek,Tietotekniikan koulutus,, +Aalto-yliopisto,Aalto University,,10076,,,,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A855,ARTS Infra,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U920,Aalto Common Items,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T409,Aalto Nanofab,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A850,Aalto Studios,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U917,Advancement and Corporate Engagament Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U912,Campus Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E720,Center for Knowledge and Innovation Research (CKIR,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E710,Center for Markets in Transition (CEMAT),, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U908,Communications Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T407,Department of Communications & Networking,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E701,Department of Accounting,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T304,Department of Applied Physics,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T201,Department of Architecture,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A805,Department of Art,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T107,Department of Bioproducts and Biosystems,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T101,Department of Biotechnology and Chemical Technolog,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T213,Department of Built Environment,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T106,Department of Chemical and Metallurgical Engineeri,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T102,Department of Chemistry,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T105,Department of Chemistry and Materials Science,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T214,Department of Civil Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T313,Department of Computer Science,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A803,Department of Design,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E703,Department of Economics,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T410,Department of Electrical Engineering & Automation,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T411,Department of Electronics & Nanoengineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A801,"Department of Film, Television and Scenography",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E707,Department of Finance,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T104,Department of Forest Products Technology,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T307,Department of Industrial Engineering and Managemen,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E704,Department of Information and Service Management,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E706,Department of Management Studies,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E702,Department of Marketing,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T103,Department of Materials Science and Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T302,Department of Mathematics and Systems Analysis,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T212,Department of Mechanical Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A802,Department of Media,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T314,Department of Neuroscience and Biomedical Engineer,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T404,Department of Radio Science & Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T405,Department of Signal Processing & Acoustics,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T403,Deptartment of Micro & Nanoscience,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T210,Design Factory,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T310,EIT Digital Helsinki,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U918,Endowment,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U300,Endowment,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U905,Finance Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U906,HR Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U909,IT Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U924,Innovation Ecosystem Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U410,Joint Learning,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U420,Joint entrepreneurship act.,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U926,Language Center,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U900,"Leadership and Leadership Support Services, Joint",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U902,Learning Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U925,Legal Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T408,MetsƤhovi Radio Observatory,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U400,OtaNano,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U901,Research Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A899,School Common ARTS,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E790,"School Common, BIZ",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,E700,"School Services, BIZ",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T200,"School cervices, ENG",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T499,"School common, ELEC",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T211,"School common, ENG",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T320,"School common, SCI",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,ARTS,"School of Arts, Design and Architecture",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,BIZ,School of Business,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,CHEM,School of Chemical Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,ELEC,School of Electrical Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,ENG,School of Engineering,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,SCI,School of Science,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T400,School services ELEC,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T300,School services SCI,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,A800,"School services, ARTS",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T100,"School services, CHEM",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U915,Secretarial Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U919,Security and Lobby Services,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T199,"Shool common, CHEM",, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U200,U JSI,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U100,University level,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U,University level and joint,, +Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U,"University level, Joint units & Endowment",, +AlkoholitutkimussƤƤtiƶ,The Finnish Foundation for Alcohol Studies,,02009295,,,,, +CSC - Tieteen tietotekniikan keskus Oy,CSC ā€“ IT Center for Science,,09206320,,,,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,,,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Huma,Humanistisen ja kasvatusalan koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Kemtek,Kemiantekniikan koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Korkpal,Korkeakoulupalvelut,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Liiktal,Liiketalouden koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Musa,Musiikin koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Sosia,Sosiaalialan koulutus,, Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Sahtek,SƤhkƶ- ja automaatiotekniikan koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Terva,Terveysalan koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Sosia,Sosiaalialan koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Musa,Musiikin koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Huma,Humanistisen ja kasvatusalan koulutus,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,YAMK,YlemmƤt AMK-tutkinnot,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Korkpal,Korkeakoulupalvelut,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,TKIpal,TKI-palvelut,, -Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,Tuotal,Tuotantotalouden koulutus,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,,,http://isni.org/isni/0000000088808274,367 +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,SƃĀ¤htek,SƤhkƶ- ja automaatiotekniikan koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,SƤhtek,SƤhkƶ- ja automaatiotekniikan koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,TKIpal,TKI-palvelut,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Terva,Terveysalan koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Tiettek,Tietotekniikan koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,Tuotal,Tuotantotalouden koulutus,, +Centria-ammattikorkeakoulu,Centria University of Applied Sciences,Centria-ammattikorkeakoulu,02536,,YAMK,YlemmƤt AMK-tutkinnot,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,,,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,11,11 Yleinen ryhmƤ,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,12,"12 Communication, Communities and Languages",, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,13,13 Opiskelijoiden ohjaus ja kasvu,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,14,14 Pedagogical and community development,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,21,21 Yleinen ryhmƤ,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,22,22 Aikuis- ja tyƶikƤisen vƤestƶn kanssa tehtƤvƤ tyƶ,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,23,Alusta DIGI-USER,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,24,24 Hoitotyƶn ja terveyden edistƤminen,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,26,"26 Lapsi-, nuoriso- ja perhetyƶ",, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,31,31 Yleinen ryhmƤ,, @@ -94,2347 +115,4232 @@ Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,41,4 Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,42,42 TKI-opinnot ja -toiminta,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,43,"43 YlemmƤt tutkinnot, tyƶkƤytƤnnƶt ja johtamistoiminta",, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,51,51 Yleinen ryhmƤ,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,61,61 Rehtoraatti,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,62,"62 ViestintƤ, tiedotus ja markkinointi",, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,63,63 Henkilƶstƶpalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,64,64 Talouspalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,65,65 Tietohallintopalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,66,66 Kirjasto ja tietopalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,68,68 Tilapalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,74,"74 Opintotoimistopalvelut, hakutoimisto",, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,75,75 KansainvƤlisyyspalvelut,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,,,http://isni.org/isni/0000000406476405,570 -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,AOKK,Ammatillinen opettajankoulutus,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,JOURA,Journalismin ko,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,ASSI,Johdon assistenttityƶn ja kielten ko.,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,FINA,Finanssi- ja talousasiantuntijan koulutusohjelma,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,GLOBBA,DP in International Business,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HELI,Liiketalouden ko,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,LIIPO,Liiketalouden ko,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,23,Alusta DIGI-USER,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,65,Digipalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,65,Digipalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,87,Hanketoiminta;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,63,Henkilostopalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,63,Henkilostopalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,68,Infrapalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,68,Infrapalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,84,Innovaatiotoiminta;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,67,Kampuspalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,67,Kampuspalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,88,Kansainvalinen toiminta,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,88,Kansainvalisyys;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,66,Kirjasto ja tietopalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,66,Kirjasto ja tietopalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,96,Kirkolliset opinnot,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,96,Kirkolliset opinnot;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,91,Koulutus,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,91,Koulutus;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,85,Liiketoiminta ja innovaatiot;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,74,Opiskelijapalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,74,Opiskelijapalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,86,Pedagogiikka ja kehittaminen;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,86,Pedagogiikka ja tutkimus,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,61,Rehtoraatti,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,61,Rehtoraatti;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,92,Sairaanhoitaja- ja terveydenhoitajatutkinnot (temk,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,93,Sairaanhoitaja- ja terveydenhoitajatutkinnot (temk,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,94,Sosionomitutkinnot (temkok 1-3),, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,94,Sosionomitutkinnot (temkok 1-3);;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,95,"Sosionomitutkinnot (temkok 4-7, DSS)",, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,95,"Sosionomitutkinnot (temkok 4-7, DSS);;",, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,87,TKI-palvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,84,TKI-toiminta,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,64,Talouspalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,64,Talouspalvelut;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,97,Tulkkaustutkinnot ja kielet,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,97,Tulkkaustutkinnot ja kielet;;,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,85,Tyoelamapalvelut,, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,62,"Viestinta, tiedotus ja markkinointi",, +Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,62,"Viestinta, tiedotus ja markkinointi;;",, +Geologian tutkimuskeskus,Geological Survey of Finland,,5040011,,,,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030014,Alueellinen geotieto ALG,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030016,Digitaaliset tuotteet ja palvelut DIP,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200350,Digitaalisuus ja tietovarannot,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300311,Energia ja rakentamisen ratkaisut,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030001,Geoenergia GNR,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300361,Geofysiikan ratkaisut,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030012,Geofysiikan sovellukset GSO,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030015,Geotietovarannon hallinta GEH,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200510,"Henkilƶstƶ, osaaminen ja lakiasiat",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,50402003,5040200310,"Henkilƶstƶ, osaaminen ja tyƶympƤristƶt",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030003,Kalliorakentaminen ja sijoituspaikat KAS,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300355,"Kiertotalouden ratkaisut, Espoon tutkimuslaboratorio",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300356,"Kiertotalouden ratkaisut, KaivosympƤristƶt ja sivuvirrat",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300352,"Kiertotalouden ratkaisut, Oku koetehdas",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300353,"Kiertotalouden ratkaisut, Oku rikastuslaboratorio",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300354,"Kiertotalouden ratkaisut, Oku rikastusmineralogia",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300351,"Kiertotalouden ratkaisut, Yhteiset",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300171,Malmit ja teollisuusmineraalit MTM,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030005,Merigeologia MRG,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300341,Mineraalitalouden ratkaisut,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030006,Mineraalitalous ja malmigeologia MIM,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030009,Mineraalitekniikka ja materiaalit MMA,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030007,Mineraalivarannot MIV,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030011,Pohjavesi PVI,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200330,Strategia ja suunnittelu,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200710,Strategia ja toiminnan tuki,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040200,50402003,Strateginen johto,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030008,Teollisuusmineraalit TMI,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200340,Tieteellinen tutkimus,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200610,Tieteellinen tutkimus,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300301,Tietoratkaisut,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040200,5040200,Toiminnan johtaminen,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,5040300,Tulosyksikƶt,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030013,TuotantoympƤristƶt ja kierrƤtys TUK,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030010,Turvevarannot TUR,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300321,Vesiratkaisut,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200320,ViestintƤ ja markkinointi,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040200420,ViestintƤ ja markkinointi,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300018,Yhdyskunnat ja geoenergia YGE,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030002,Yhdyskunnat ja rakentaminen YRA,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030004,YmpƤristƶgeologia YMP,, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300332,"YmpƤristƶratkaisut, Merigeologia",, +Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300331,"YmpƤristƶratkaisut, YmpƤristƶ",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,,,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,AOKK,AOKK Ammatillinen opettajakorkeakoulu,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,AOKK,AOKK Ammatillinen opettajakorkeakoulu;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2400,Analytiikka ja kehittƤminen,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,MUBBA,DP for Multilingual Management Assistants,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,MYYNTI,Myyntityƶn ko,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,POBBA,DP in International Business,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,SAMPO,DP in International Sales and Marketing,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,VIMA,Myyntityƶn ja visuaalisen markkinoinnin ko,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,ABBA,DP in Aviation Business,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HETI,TietojenkƤsittelyn ko,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,BITE,DP in Business Information Technology,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,LOT,Liikunnan ja vapaa-ajan ko,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,SPORT,DP in Sports and Leisure Management,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HOTRA,Hotelli- ja ravintola-alan liikkeenjohdon ko.,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,RUOKA,Hotelli- ja ravintola-alan ko.,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,RUOKAT,Ruokatuotannon johtamisen ko,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,MATKA,Matkailun liikkeenjohdon ko.,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HOSBA,"DP in Hotel, Restaurant and Tourism Management",, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HOTEM,"DP in Hospitality, Tourism and Experience Management",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HOSBA,"DP in Hotel, Restaurant and Tourism Management",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,GLOBBA,DP in International Business,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,POBBA,DP in International Business,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,SAMPO,DP in International Sales and Marketing,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,SPORT,DP in Sports and Leisure Management,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,TOBBA,DP in Tourism,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,"DT-HTP, TKI, Kovat",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Kovat ja muut (ei opetus);;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Medianomi-opetus Pasilassa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Medianomi-opetus Pasilassa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi IT -opetus Pasilassa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi IT-opetus Pasilassa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi-opetus Pasilassa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi-opetus Pasilassa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5500,Digipalvelut ja tietohallinto,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2410,Digitaaliset palvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,"EHTH-HTP, TKI ja Kovat Haagassa",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,EHTH-Kovat ja muut (ei opetus) Haagassa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,EHTH-Restonomi-opetus Haagassa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,EHTH-Restonomi-opetus Haagassa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,"EHTP-HTP, TKI ja Kovat Porvoossa",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Kovat ja muut (ei opetus) Porvoossa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Restonomi-opetus Porvoossa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Restonomi-opetus Porvoossa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Tradenomi-opetus Porvoossa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Tradenomi-opetus Porvoossa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,"EHTV-HTP, TKI ja Kovat VierumƤellƤ",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,EHTV-Kovat ja muut (ei opetus) Vierumaella;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,EHTV-Liikunnanohjaaja-koulutus VierumƤellƤ,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,EHTV-Liikunnanohjaaja-opetus Vierumaella;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5510,Elinkaari- ja kƤyttƤjƤpalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,FINA,Finanssi- ja talousasiantuntijan koulutusohjelma,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2100,HR ja johtaminen,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,HTP,"HTP-Hallinto- ja tukipalvelut (KOPA, HR, Tieto, Ta",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,HTP,HTP-Kaupalliset palvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,HTP,HTP-Kaupalliset palvelut;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5200,Henkilƶstƶ ja kulttuuri,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,RUOKA,Hotelli- ja ravintola-alan ko.,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HOTRA,Hotelli- ja ravintola-alan liikkeenjohdon ko.,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2420,ICT ja liiketoiminta,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5520,ICT-infra ja pƤƤsynhallinta,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1400,IT-tradenomi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2430,Infra ja pilvipalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1900,Jatkuva oppiminen,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,ASSI,Johdon assistenttityƶn ja kielten ko.,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2500,Journalismi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,JOURA,Journalismin ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2110,Juridiikka,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,4300,KansainvƤlisyyspalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2510,Kielet ja kansainvƤlisyys,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5420,Kiinteistƶt ja turvallisuus,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5120,Koulutuksen palvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,4200,Koulutusvienti,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2000,Kp 2000,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LT,LT-Kovat ja muut (ei opetus);;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LT,"LT-TKI, HTP, Kovat",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTM,LT-Tradenomi-opetus Malmilla,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTM,LT-Tradenomi-opetus Malmilla;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTP,LT-Tradenomi-opetus Pasilassa,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTP,LT-Tradenomi-opetus Pasilassa;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HELI,Liiketalouden ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,LIIPO,Liiketalouden ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,LOT,Liikunnan ja vapaa-ajan ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1600,Liikunnanohjaaja,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2600,Liikunta ja hyvinvointi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2200,Majoitus- ja ravitsemisliiketoiminta,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2520,Markkinointi ja viestintƤ,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1800,Master-tutkinnot,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2210,Matkailuliiketoiminta,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,POMO,Matkailun ko,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HTP,Hallinto- ja tukipalvelut,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,,,http://isni.org/isni/0000000404102071,11 -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H01,Yliopistopalvelut,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H10,Teologinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H20,Oikeustieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H30,LƤƤketieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H40,Humanistinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H50,Matemaattis- luonnontieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H55,Farmasian tiedekunta,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,MATKA,Matkailun liikkeenjohdon ko.,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1500,Medianomi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3100,Myynnin kehittƤminen ja digitalisaatio,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2120,Myynti ja asiakkuudet,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,4100,Myynti ja partneritoiminta,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,VIMA,Myyntityƶn ja visuaalisen markkinoinnin ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,MYYNTI,Myyntityƶn ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2440,Ohjelmistotuotanto,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5110,Opinto- ja hakijapalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3500,Oppimis- ja tutkimusympƤristƶt,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3200,Palveluliiketoiminnan kehittƤminen ja muotoilu,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2130,Palveluliiketoiminta,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2700,Pedagogiikka,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,4400,Pedagoginen tƤydennyskoulutus,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2140,Raha ja talous,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5000,"Rehtori, toimitusjohtaja",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1200,Restonomi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,RUOKAT,Ruokatuotannon johtamisen ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Master-tiimi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Start-Up School,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Start-Up School;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Tukipalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Tukipalvelutiimi;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Ylempi AMK-opetus;;,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5300,Talous,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HETI,TietojenkƤsittelyn ko,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3650,Tietopalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2450,Toiminnanohjaus,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5100,Toiminnanohjaus ja laatu,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1100,Tradenomi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1150,Tradenomi ENG,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3600,Tutkimuspalvelut,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3610,Ulysseus Eurooppa-yliopisto,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1000,Ura- ja opinto-ohjaus,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2610,Urheilu,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3300,Vaikuttava ammatillinen pedagogiikka,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5400,Vastuullisuus ja opiskelijahyvinvointi,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5410,"ViestintƤ, markkinointi ja alumnitoiminta",, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2150,YrittƤjyys,, +Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3400,YrittƤjyys ja liiketoiminnan uudistaminen,, +Helsingin Sanomain SƤƤtiƶ sr,Helsingin Sanomat Foundation,,20668235,,,,, +Helsingin seudun yliopistollisen keskussairaalan erityisvastuualue,Helsinki University Hospital Catchment Area,,15675350,,,,, +Helsingin yliopisto,University of Helsinki,,01901,,,,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H930,Avoin yliopisto,, Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H57,Bio- ja ympƤristƶtieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H60,KƤyttƤytymistieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H70,Valtiotieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H74,Svenska social- och kommunalhƶgskolan,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H80,Maatalous- metsƤtieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H90,ElƤinlƤƤketieteellinen tiedekunta,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H903,Tila- ja kiinteistƶkeskus,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H906,Kielikeskus,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H907,Tietotekniikkakeskus,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H909,UniSport,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H916,Neurotieteen tutkimuskeskus,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H918,Tutkijakollegium,, Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H919,Biotekniikan instituutti,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H92,Helsingin yliopiston tutkijakoulut,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H930,Avoin yliopisto,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H945,Suomen molekyylilƤƤketieteen instituutti,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H90,ElƤinlƤƤketieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H55,Farmasian tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H985,Helsingin yliopiston kirjasto,, Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H955,Helsingin yliopiston koe-elƤinkeskus,, Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H960,Helsingin yliopiston koulutus- ja kehittƤmispalvelut,, Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H97,Helsinki Institute of Life Science,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H978,Luonnontieteellinen keskusmuseo,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H40,Humanistinen tiedekunta,, Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H981,Kansalliskirjasto,, -Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H985,Helsingin yliopiston kirjasto,, -Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,,,http://isni.org/isni/000000040624783X,1875 +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H60,Kasvatustieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H906,Kielikeskus,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H978,Luonnontieteellinen keskusmuseo,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H30,LƤƤketieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H80,Maatalous-metsƤtieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H50,Matemaattis-luonnontieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H916,Neurotieteen tutkimuskeskus,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H20,Oikeustieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H945,Suomen molekyylilƤƤketieteen instituutti,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H74,Svenska social- och kommunalhƶgskolan,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H10,Teologinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H907,Tietotekniikkakeskus,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H903,Tila- ja kiinteistƶkeskus,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H92,Tohtoriohjelmat,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H918,Tutkijakollegium,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H909,Unisport,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H70,Valtiotieteellinen tiedekunta,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H00,Yliopiston johto ja yhteiset,, +Helsingin yliopisto,University of Helsinki,Helsingfors universitet,01901,,H01,Yliopistopalvelut,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,,02631,,,,, +Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,30,Humanistiset alat,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,400,Innovaatiopalvelut,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,300,KehittƤmispalvelut,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,200,KulTu,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,100,Kulttuuri,, Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,10,Taiteet ja kulttuurialat,, Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,20,Terveys- ja hyvinvointialat,, -Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,30,Humanistiset alat,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,300,Tulkkaus,, Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,50,Yhteiset,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,,,http://isni.org/isni/0000000106852595,9 -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,1,Yhteiset palvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,11,YrittƤjyys ja liiketoimintaosaaminen,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,4,Ammatillinen opettajakorkeakoulu,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,6,Hyvinvointiosaaminen,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,7,Teknologiaosaaminen,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,8,Biotalous,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,,,http://isni.org/isni/0000000121143658,SXTDpC1TdW9tZW4geWxpb3Bpc3Rv -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202010,Kasvatustieteiden ja psykologian osasto / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,100000,100000,"Yliopiston johto, yhteiset ja yliopistopalvelut",, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,400,Yhteiset palvelut,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,500,Yhteiset palvelut,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,100,Yhteisƶjen kehittƤminen,, +Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,200,Yhteisƶpedagogi,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,,,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,132,AOKK tukipalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,132,AOKK tukipalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,131,Ammatillinen erityisopettaja- ja opinto-ohjaajakou,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,4,Ammatillinen opettajakorkeakoulu,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,4,Ammatillinen opettajakorkeakoulu;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,2,Ammatillinen opettajakoulutus,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,2,Ammatillinen opettajakoulutus;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,60,Ammatillisen opettajakorkeakoulun tyoelamapalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,101,BIOS tukipalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,101,BIOS tukipalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,5,Bio- ja elintarviketekniikka,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,5,Bio- ja elintarviketekniikka;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,61,Biotalouden tyoelamapalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,61,Biotalouden tyoelamapalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,8,Biotalouden yksikko,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,8,Biotalouden yksikko;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,102,Biotalousinsinoori,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,102,Biotalousinsinoori;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,48,HAMK Bio,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,48,HAMK Bio;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,89,HAMK Design Factory,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,89,HAMK Design Factory;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,3,HAMK Edu,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,3,HAMK Edu;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,43,HAMK Smart,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,43,HAMK Smart;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,27,HAMK Tech,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,27,HAMK Tech;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,13,HYOS tukipalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,13,HYOS tukipalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,10,Hallinto-ja talouspalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,10,Hallinto-ja talouspalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,12,Henkilostopalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,12,Henkilostopalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,46,Hoitotyƶ,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,62,Hyvinvointiosaamisen tyoelamapalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,62,Hyvinvointiosaamisen tyoelamapalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,6,Hyvinvointiosaamisen yksikko,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,6,Hyvinvointiosaamisen yksikko;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,14,International Business,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,14,International Business;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,67,Kansainvaliset palvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,67,Kansainvaliset palvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,15,Kestava kehitys,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,15,Kestava kehitys;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,16,Kiinteistopalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,16,Kiinteistopalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,17,Kirjasto ja tietopalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,17,Kirjasto ja tietopalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,18,Konetekniikka,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,18,Konetekniikka;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,44,Koulutuksen tukipalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,44,Koulutuksen tukipalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,37,Liikenneala,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,37,Liikenneala;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,20,Liiketalous,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,20,Liiketalous;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,21,Maaseutuelinkeinot ja hevostalous,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,21,Maaseutuelinkeinot ja hevostalous;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,25,Metsatalous,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,25,Metsatalous;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,26,Muotoilu,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,26,Muotoilu;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,29,Puutarhatalous,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,29,Puutarhatalous;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,30,Rakennettu ymparisto,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,30,Rakennettu ymparisto;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,31,Rakennus- ja yhdyskuntatekniikka,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,31,Rakennus- ja yhdyskuntatekniikka;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,33,Sahko- ja automaatiotekniikka,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,33,Sahko- ja automaatiotekniikka;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,32,Sosiaaliala,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,32,Sosiaaliala;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,40,Strateginen viestinta,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,40,Strateginen viestinta;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,34,TEOS tukipalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,34,TEOS tukipalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,65,Teknologiaosaamisen tyoelamapalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,65,Teknologiaosaamisen tyoelamapalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,7,Teknologiaosaamisen yksikko,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,7,Teknologiaosaamisen yksikko;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,46,Terveys Hml ja Frs;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,35,Tieto- ja viestintatekniikka,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,35,Tieto- ja viestintatekniikka;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,36,Tietohallinto,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,36,Tietohallinto;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,38,Tietojenkasittely,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,38,Tietojenkasittely;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,42,YRLI tukipalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,42,YRLI tukipalvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,1,Yhteiset palvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,1,Yhteiset palvelut;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,41,Yleishallinto,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,41,Yleishallinto;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,11,Yrittajyyden ja liiketoimintaosaamisen yksikko,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,11,Yrittajyyden ja liiketoimintaosaamisen yksikko;;,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,66,Yrittajyyden tyoelamapalvelut,, +HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,66,Yrittajyyden tyoelamapalvelut;;,, +Ilmatieteen laitos,Finnish Meteorological Insitute,Meterologiska institutet,02446647,,,,http://isni.org/isni/0000000122538678, +Ilmatieteen laitos,Finnish Meteorological Institute,,4940015,,,,, +Innovaatiorahoituskeskus Business Finland,Innovaatiorahoituskeskus Business Finland,,05126964,,,,, +ItƤ-Suomen yliopisto,University of Eastern Finland,,10088,,,,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,401010,A.I. Virtanen -instituutti,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,401020,A.I. Virtanen -instituutti / Bioteknologia ja molekulaarinen lƤƤketiede,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,401030,A.I. Virtanen -instituutti / Neurobiologia,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,401010,A.I. Virtanen -instituutti / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,402010,Farmasian laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,200010,Filosofisen tiedekunnan hallinto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,200010,Filosofisen tiedekunnan hallinto / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,201010,Humanistinen osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,303010,Fysiikan ja matematiikan laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,205010,Harjoittelukoulut / Joensuu,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,205020,Harjoittelukoulut / Savonlinna,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,501010,Historia- ja maantieteiden laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,403010,Hoitotieteen laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2010,Humanistinen osasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,201020,Humanistinen osasto / Suomen kieli ja kulttuuritieteet,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,201030,Humanistinen osasto / Vieraat kielet ja kƤƤnnƶstiede,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,201010,Humanistinen osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201020,"Humanistinen osasto, suomen kieli ja kulttuuritiet",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201020,"Humanistinen osasto, suomen kieli ja kulttuuritieteet",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201030,"Humanistinen osasto, vieraat kielet ja kƤƤnnƶst",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201030,"Humanistinen osasto, vieraat kielet ja kƤƤnnƶstiede",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201010,"Humanistinen osasto, yhteiset",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,600000,600000,ItƤ-Suomen yliopiston apteekki,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,600010,ItƤ-Suomen yliopiston apteekki,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2050,ItƤ-Suomen yliopiston harjoittelukoulu,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,205010,ItƤ-Suomen yliopiston harjoittelukoulu,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,205010,"ItƤ-Suomen yliopiston harjoittelukoulu, Joensuu",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,205020,"ItƤ-Suomen yliopiston harjoittelukoulu, Savonlinna",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,700000,Jatkuvan oppimisen keskus,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,506010,Karjalan tutkimuslaitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2020,Kasvatustieteiden ja psykologian osasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202020,Kasvatustieteiden ja psykologian osasto / Erityispedagogiikka,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202030,"Kasvatustieteiden ja psykologian osasto / Kasvatustiede, aikuiskasvatus ja ohjaus",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202040,Kasvatustieteiden ja psykologian osasto / Psykologia,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,203010,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202010,Kasvatustieteiden ja psykologian osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202040,"Kasvatustieteiden ja psykologian osasto, Psykologi",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202040,"Kasvatustieteiden ja psykologian osasto, Psykologia",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202020,"Kasvatustieteiden ja psykologian osasto, erityispe",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202020,"Kasvatustieteiden ja psykologian osasto, erityispedagogiikka",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202030,"Kasvatustieteiden ja psykologian osasto, kasvatust",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202030,"Kasvatustieteiden ja psykologian osasto, kasvatustiede, aikuiskasvatus ja ohjaus",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202010,"Kasvatustieteiden ja psykologian osasto, yhteiset",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,502010,Kauppatieteiden laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,304010,Kemian laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,800010,Kielikeskus,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,800020,Kirjasto,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,405010,Koe-elƤinkeskus,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,700000,Koulutus- ja kehittƤmispalvelu Aducate,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan ha,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan hallinto,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan hallinto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,4040,LƤƤketieteen laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404020,LƤƤketieteen laitos / BiolƤƤketiede,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404050,LƤƤketieteen laitos / HammaslƤƤketiede,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404030,LƤƤketieteen laitos / Kansanterveystiede ja kliininen ravitsemustiede,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404040,LƤƤketieteen laitos / Kliininen lƤƤketiede,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404010,LƤƤketieteen laitos / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404020,"LƤƤketieteen laitos, biolƤƤketiede",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404020,"LƤƤketieteen laitos, biolƤƤketieteen yksikkƶ",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404050,"LƤƤketieteen laitos, hammaslƤƤketiede",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404030,"LƤƤketieteen laitos, kansanterveystiede ja kliininen ravitsemustiede",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404030,"LƤƤketieteen laitos, kansanterveystieteen ja kli",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404040,"LƤƤketieteen laitos, kliininen lƤƤketiede",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404040,"LƤƤketieteen laitos, kliinisen lƤƤketieteen yk",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404010,"LƤƤketieteen laitos, yhteiset",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,507010,Matkailualan opetus- ja tutkimuslaitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,309010,MekrijƤrven tutkimusasema,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,305010,MetsƤtieteiden osasto,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,503010,Oikeustieteiden laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,308010,SIB-labs -infrastruktuuriyksikkƶ,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,308010,SIB-labs infrastruktuuriyksikkƶ,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,504010,Sosiaali- ja terveysjohtamisen laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,303510,Sovelletun fysiikan laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203010,Soveltavan kasvatustieteen ja opettajankoulutuksen,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203020,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2030,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,203020,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto / Joensuu,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,203030,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto / Savonlinna,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,204010,Teologian osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,203010,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203030,"Soveltavan kasvatustieteen ja opettajankoulutuksen osasto, Savonlinna",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203010,"Soveltavan kasvatustieteen ja opettajankoulutuksen osasto, yhteiset",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2040,Teologian osasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,204020,Teologian osasto / LƤntinen teologia,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,204030,Teologian osasto / Ortodoksinen teologia,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,205010,Harjoittelukoulut / Joensuu,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,205020,Harjoittelukoulut / Savonlinna,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan hallinto / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,303010,Fysiikan ja matematiikan laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,303510,Sovelletun fysiikan laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,304010,Kemian laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,305010,MetsƤtieteiden osasto,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,306020,TietojenkƤsittelytieteen laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,307010,YmpƤristƶ- ja biotieteiden laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,308010,SIB-labs -infrastruktuuriyksikkƶ,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,309010,MekrijƤrven tutkimusasema,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,204010,Teologian osasto / Yhteiset,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,204020,"Teologian osasto, lƤntinen teologia",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,204030,"Teologian osasto, ortodoksinen teologia",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,204010,"Teologian osasto, yhteiset",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,400010,Terveystieteiden tiedekunnan hallinto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,400010,Terveystieteiden tiedekunnan hallinto / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,401010,A.I. Virtanen -instituutti / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,401020,A.I. Virtanen -instituutti / Bioteknologia ja molekulaarinen lƤƤketiede,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,401030,A.I. Virtanen -instituutti / Neurobiologia,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,402010,Farmasian laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,403010,Hoitotieteen laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404010,LƤƤketieteen laitos / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404020,LƤƤketieteen laitos / BiolƤƤketiede,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404030,LƤƤketieteen laitos / Kansanterveystiede ja kliininen ravitsemustiede,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404040,LƤƤketieteen laitos / Kliininen lƤƤketiede,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404050,LƤƤketieteen laitos / HammaslƤƤketiede,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,405010,Koe-elƤinkeskus,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,306020,TietojenkƤsittelytieteen laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,500010,Yhteiskuntatieteiden ja kauppatieteiden tiedekunna,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,500010,Yhteiskuntatieteiden ja kauppatieteiden tiedekunnan hallinto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,500010,Yhteiskuntatieteiden ja kauppatieteiden tiedekunnan hallinto / Yhteiset,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,501010,Historia- ja maantieteiden laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,502010,Kauppatieteiden laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,503010,Oikeustieteiden laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,504010,Sosiaali- ja terveysjohtamisen laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,505010,Yhteiskuntatieteiden laitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,506010,Karjalan tutkimuslaitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,507010,Matkailualan opetus- ja tutkimuslaitos,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,600000,600000,ItƤ-Suomen yliopiston apteekki,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,700000,700000,Koulutus- ja kehittƤmispalvelu Aducate,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,800000,800010,Kielikeskus,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,800000,800020,Kirjasto,, -JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,,,http://isni.org/isni/0000000404148475,Snl2w6Rza3lsw6RuIGFtbWF0dGlrb3JrZWFrb3VsdQ== -JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,1,Ammatillinen opettajakorkeakoulu,, -JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,2,Hyvinvointiyksikkƶ,, -JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,3,Liiketoimintayksikkƶ,, -JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,4,Teknologiayksikkƶ,, -JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,5,Hallintoyksikkƶ,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,,,http://isni.org/isni/0000000121799569,Snl2w6Rza3lsw6RuIHlsaW9waXN0bw== +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,505010,Yhteiskuntatieteiden laitos,, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,100000,"Yliopiston johto, yhteiset ja yliopistopalvelut",, +ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,307010,YmpƤristƶ- ja biotieteiden laitos,, +JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,,02504,,,,, +JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,JyvƤskylƤn ammattikorkeakoulu,02504,,1,Ammatillinen opettajakorkeakoulu,, +JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,JyvƤskylƤn ammattikorkeakoulu,02504,,5,Hallintoyksikkƶ,, +JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,JyvƤskylƤn ammattikorkeakoulu,02504,,2,Hyvinvointiyksikkƶ,, +JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,JyvƤskylƤn ammattikorkeakoulu,02504,,3,Liiketoimintayksikkƶ,, +JyvƤskylƤn ammattikorkeakoulu,JAMK University of Applied Sciences,JyvƤskylƤn ammattikorkeakoulu,02504,,4,Teknologiayksikkƶ,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,,01906,,,,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,20019100,Agora Center,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,216010,Avance Johtamiskoulutus,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219200,Avoimen tiedon keskus,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219110,Avoin yliopisto,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219200,Kirjasto,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219120,Kokkolan yliopistokeskus Chydenius,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,20019140,Koulutuksen tutkimuslaitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219230,Tiedemuseo,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219220,Yliopiston kielikeskus,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219700,Johto ja yliopiston yhteiset,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219500,Yliopistopalvelut,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213000,Humanistinen tiedekunta,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213050,Historian ja etnologian laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213040,Kielten laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213010,Musiikin laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213060,Soveltavan kielentutkimuksen keskus,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213030,Taiteiden ja kulttuurin tutkimuksen laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213020,ViestintƤtieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,214010,Bio- ja ympƤristƶtieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,214020,Fysiikan laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213050,Historian ja etnologian laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213000,Humanistis-yhteiskuntatieteellinen tiedekunta,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,217000,Informaatioteknologian tiedekunta,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,217000,217010,TietojenkƤsittelytieteiden laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,217000,217020,Tietotekniikan laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219700,Johto ja yliopiston yhteiset,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,211000,Kasvatustieteiden ja psykologian tiedekunta,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,211020,Kasvatustieteiden laitos,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,216000,Kauppakorkeakoulu,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,216000,216010,Avance Johtamiskoulutus,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,211000,Kasvatustieteiden tiedekunta,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,211000,211010,Opettajankoulutuslaitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,211000,211020,Kasvatustieteiden laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,215000,Liikuntatieteellinen tiedekunta,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,214030,Kemian laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213040,Kieli- ja viestintƤtieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213040,Kielten laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219120,Kokkolan yliopistokeskus Chydenius,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,20019140,Koulutuksen tutkimuslaitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219140,Koulutuksen tutkimuslaitos,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,215000,215020,Liikuntabiologian laitos,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,215000,215030,Liikuntakasvatuksen laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,215000,215010,Terveystieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,215000,Liikuntatieteellinen tiedekunta,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,214000,Matemaattis-luonnontieteellinen tiedekunta,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,214000,214020,Fysiikan laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,214000,214030,Kemian laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,214000,214010,Bio- ja ympƤristƶtieteiden laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,214000,214040,Matematiikan ja tilastotieteen laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,214040,Matematiikan ja tilastotieteen laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219220,Monikielisen akateemisen viestinnƤn keskus,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213010,Musiikin laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213030,"Musiikin, taiteen ja kulttuurin tutkimuksen laitos",, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,210100,Normaalikoulu,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,212000,Yhteiskuntatieteellinen tiedekunta,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,211010,Opettajankoulutuslaitos,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,212000,212020,Psykologian laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,211040,Psykologian laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213060,Soveltavan kielentutkimuksen keskus,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213030,Taiteiden ja kulttuurin tutkimuksen laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,215000,215010,Terveystieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219230,Tiedemuseo,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,217000,217010,TietojenkƤsittelytieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,217000,217020,Tietotekniikan laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,213000,213020,ViestintƤtieteiden laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,212000,Yhteiskuntatieteellinen tiedekunta,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,212000,212010,Yhteiskuntatieteiden ja filosofian laitos,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,,,,http://isni.org/isni/0000000459488864,971 -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,10000,10000,Talous ja kulttuuri yhteiset,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,10000,11000,"Liiketalouden koulutusyksikkƶ, Kouvola",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,10000,12000,"Liiketalouden koulutusyksikkƶ, Mikkeli",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,10000,13000,Kulttuurin koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,20000,Tekniikka yhteiset,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,21000,Rakennus- ja energiatekniikan koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,22000,Informaatioteknologian koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,23000,"SƤhkƶ-, talo- ja materiaalitekniikan koulutusyksikkƶ",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,24000,MetsƤtalouden ja ympƤristƶteknologian koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,25000,Logistiikan ja merenkulun koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,30000,Hyvinvointi yhteiset,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,31000,Kuntoutus- ja terveysalan koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,32000,Terveysalan ja ensihoidon koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,33000,Sosiaali- ja terveysalan koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,34000,Sosiaalialan ja toimintakyvyn edistƤmisen koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,35000,"Matkailu-,ravitsemis- ja nuorisoalan koulutusyksikkƶ",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,,40000,Opetuksen hallinto,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,,51000,Opetuksen palvelut,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,,70000,"Talous, henkilƶstƶ- ja hallintopalvelut",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,60000,TKI ja palvelut,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,61000,TKI-palvelut,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62100,Digitaalinen talous,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62200,"MetsƤ, ympƤristƶ ja energia",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62300,Logistiikka ja merenkulku,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62400,KestƤvƤ hyvinvointi,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62500,Luovat alat,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66000,Kuitulaboratorio,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66600,Kymilabs,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213070,Yhteiskuntatieteiden ja filosofian laitos,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219220,Yliopiston kielikeskus,, +JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219500,Yliopistopalvelut,, +KAUTE-sƤƤtiƶ,,,02014465,,,,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,,10118,,,,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66100,3K-tehdas,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66400,Active Life Lab,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66700,Biosampo,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,62100,Digitaalinen talous,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66100,Elektroniikan 3K-tehdas,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,30000,Hyvinvointi yhteiset,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,22000,Informaatioteknologian koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,62400,KestƤvƤ hyvinvointi,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66000,Kuitulaboratorio,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,13000,Kulttuurin koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,31000,Kuntoutus- ja terveysalan koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66600,Kymilabs,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,11000,"Liiketalouden koulutusyksikkƶ, Kouvola",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,12000,"Liiketalouden koulutusyksikkƶ, Mikkeli",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,31000,Liikunnan ja kuntoutuksen koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,31000,Liikunta- ja kuntoutusalan koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,25000,Logistiikan ja merenkulun koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,62300,Logistiikka ja merenkulku,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62500,Luovat alat,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66800,Luovien alojen tutkimusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,35000,"Matkailu, ravitsemis- ja nuorisoalan koulutusyksik",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,35000,"Matkailu, ravitsemis- ja nuorisoalan koulutusyksikkƶ",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,35000,"Matkailu-,ravitsemis- ja nuorisoalan koulutusyksikkƶ",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,62200,"MetsƤ, ympƤristƶ ja energia",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,24000,MetsƤtalouden ja ympƤristƶteknologian koulutusy,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,24000,MetsƤtalouden ja ympƤristƶteknologian koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66300,Mikpolis,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66200,Nuorisoalan tutkimus- ja kehittƤmiskeskus Juvenia,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66300,Mikpolis,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66900,Pienyrityskeskus,, -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,,,http://isni.org/isni/0000000404149419,993 -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,10,Liiketoiminta ja innovaatiot,, -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,11,Kone- ja kaivostekniikka,, -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,12,TietojƤrjestelmƤt,, -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,13,Sairaan- ja terveydenhoito,, -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,14,Aktiviteettimatkailu,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66200,Nuorisoalan tutkimus- ja kehittƤmisyksikkƶ Juven,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66200,Nuorisoalan tutkimus- ja kehittƤmisyksikkƶ Juvenia,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,40000,Opetuksen hallinto,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,51000,Opetuksen palvelut,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66900,Pienyrityskeskus,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,21000,Rakennus- ja energiatekniikan koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,21000,Rakennus- ja energiatekniikan koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,33000,Sosiaali- ja terveysalan Mikkelin koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,36000,Sosiaali- ja terveysalan Savonlinnan koulutusyksik,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,36000,Sosiaali- ja terveysalan Savonlinnan koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,33000,Sosiaali- ja terveysalan koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,34000,Sosiaalialan ja toimintakyvyn edistƤmisen koulutu,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,34000,Sosiaalialan ja toimintakyvyn edistƤmisen koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,23000,"SƤhkƶ-, talo- ja materiaalitekniikan koulutusyksikkƶ",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,23000,"SƤhkƶ-, talo- materiaalitekniikan koulutusyksikk",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,23000,"SƤhkƶ-, talo- materiaalitekniikan koulutusyksikkƶ",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,60000,TKI ja palvelut,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,61000,TKI-palvelut,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,61000,TKI-palvelut -yksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,10000,10000,Talous ja kulttuuri yhteiset,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,70000,"Talous, henkilƶstƶ- ja hallintopalvelut",, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,10000,Talous- ja kulttuuri yhteiset,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,20000,Tekniikka yhteiset,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,32000,Terveysalan ja ensihoidon koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,42000,Yhteisten opintojen koulutusyksikkƶ,, +Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,14000,YrittƤjyys ja innovaatiot -yksikkƶ,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,,,, Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,15,Aikuis- ja tƤydennyskoulutuspalvelut AIKOPA,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,Kajaanin ammattikorkeakoulu,02473,,12,Business-osaamisalue,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,Kajaanin ammattikorkeakoulu,02473,,14,Kampuspalvelut,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,18,Kampuspalvelut,, Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,16,Koulutuspalvelut,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,Kajaanin ammattikorkeakoulu,02473,,13,Opiskelija- ja viestintƤpalvelut,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,Kajaanin ammattikorkeakoulu,02473,,11,Sote-osaamisalue,, Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,17,TKI-palvelut,, -Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,,02473,,18,Kampuspalvelut,, -Karelia,Karelia University of Applied Sciences,,02469,,,,http://isni.org/isni/0000000406476472,1013 -Karelia,Karelia University of Applied Sciences,,02469,,1054,1054 Rehtorin toimisto,, -Karelia,Karelia University of Applied Sciences,,02469,,1055,1055 Opintoasiainpalvelut,, -Karelia,Karelia University of Applied Sciences,,02469,,1057,1057 Laskentapalvelut,, -Karelia,Karelia University of Applied Sciences,,02469,,1060,1060 Henkilƶstƶpalvelut,, -Karelia,Karelia University of Applied Sciences,,02469,,1063,1063 Tiedotus- ja markkinointi,, -Karelia,Karelia University of Applied Sciences,,02469,,1066,1066 Tietohallinto,, -Karelia,Karelia University of Applied Sciences,,02469,,1069,1069 Kirjasto,, -Karelia,Karelia University of Applied Sciences,,02469,,1072,1072 KansainvƤlistymispalvelut,, -Karelia,Karelia University of Applied Sciences,,02469,,1970,1970 Projektitoiminta/YPA,, -Karelia,Karelia University of Applied Sciences,,02469,,2051,2051 TKI- ja palveluliiketoiminta,, -Karelia,Karelia University of Applied Sciences,,02469,,3051,3051 MetsƤtalous,, -Karelia,Karelia University of Applied Sciences,,02469,,3054,3054 Maaseutuelinkeinot,, -Karelia,Karelia University of Applied Sciences,,02469,,3101,3101 Energia- ja ympƤristƶtekniikka,, +Kajaanin ammattikorkeakoulu,Kajaani University of Applied Sciences,Kajaanin ammattikorkeakoulu,02473,,10,Teknologia-osaamisalue,, Karelia,Karelia University of Applied Sciences,,02469,,5000,5000 Koulutuksen hallinto/WƤrtsilƤ,, -Karelia,Karelia University of Applied Sciences,,02469,,5011,5011 TKI/Projektit/Tekniikan alat,, -Karelia,Karelia University of Applied Sciences,,02469,,5031,5031 TietojenkƤsittely,, -Karelia,Karelia University of Applied Sciences,,02469,,5051,5051 Konetekniikka,, -Karelia,Karelia University of Applied Sciences,,02469,,5054,5054 Rakennustekniikka,, -Karelia,Karelia University of Applied Sciences,,02469,,5057,5057 Tieto- ja viestintƤtekniikka,, -Karelia,Karelia University of Applied Sciences,,02469,,5061,5061 Talotekniikka,, -Karelia,Karelia University of Applied Sciences,,02469,,5064,5064 SƤhkƶtekniikka,, -Karelia,Karelia University of Applied Sciences,,02469,,5067,5067 YAMK Teknologiaosaamisen johtaminen,, -Karelia,Karelia University of Applied Sciences,,02469,,5101,5101 Liiketalous,, -Karelia,Karelia University of Applied Sciences,,02469,,5104,5104 International business,, -Karelia,Karelia University of Applied Sciences,,02469,,5110,5110 YAMK Johtamisen ja liiketoimintaosaamisen koulutus,, -Karelia,Karelia University of Applied Sciences,,02469,,5999,5999 Elinkeinotoiminta/tekniikka,, -Karelia,Karelia University of Applied Sciences,,02469,,6051,6051 Medianomikoulutus,, -Karelia,Karelia University of Applied Sciences,,02469,,6054,6054 Musiikkipedagogikoulutus,, Karelia,Karelia University of Applied Sciences,,02469,,6057,6057 Muotoilu,, -Karelia,Karelia University of Applied Sciences,,02469,,6151,6151 Restonomi,, Karelia,Karelia University of Applied Sciences,,02469,,7000,7000 Koulutuksen hallinto/Tikkarinne,, -Karelia,Karelia University of Applied Sciences,,02469,,7051,7051 Sosionomi,, -Karelia,Karelia University of Applied Sciences,,02469,,7054,7054 Sairaanhoitaja,, -Karelia,Karelia University of Applied Sciences,,02469,,7057,7057 Fysioterapeutti,, -Karelia,Karelia University of Applied Sciences,,02469,,7061,7061 Terveydenhoitaja,, -Karelia,Karelia University of Applied Sciences,,02469,,7063,7063 YAMK Sosiaali- ja terveysalan kehittƤminen ja johtaminen,, -Karelia,Karelia University of Applied Sciences,,02469,,7064,7064 YAMK IkƤosaaminen,, Karelia,Karelia University of Applied Sciences,,02469,,8067,8067 Maakuntakorkeakoulu,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,,02469,,,,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1054,1054 Rehtorin toimisto;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1055,1055 Opintoasiainpalvelut;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1053,1056 KV-palvelut;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1057,1057 Laskentapalvelut;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1060,1060 Henkilƶstƶpalvelut;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1063,1063 Tiedotus- ja markkinointi;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1066,1066 Tietohallinto;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1069,1069 Kirjasto;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1072,1072 KansainvƤlistymispalvelut;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1080,1080 Opetuksen yhteiset henk.kulut;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1970,1970 Projektitoiminta/YPA;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,2051,2051 TKI- ja palveluliiketoiminta;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3051,3051 MetsƤtalous;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3054,3054 Maaseutuelinkeinot;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3101,3101 Energia- ja ympƤristƶtekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5011,5011 TKI/Projektit/Tekniikan alat;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5031,5031 TietojenkƤsittely;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5051,5051 Konetekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5054,5054 Rakennustekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5057,5057 Tieto- ja viestintƤtekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5061,5061 Talotekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5064,5064 SƤhkƶtekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5067,5067 YAMK Teknologiaosaamisen johtaminen;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5101,5101 Liiketalous;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5104,5104 International business;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5110,5110 YAMK Johtamisen ja liiketoimintaosaamisen kou,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5999,5999 Elinkeinotoiminta/tekniikka;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6051,6051 Medianomikoulutus;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6054,6054 Musiikkipedagogikoulutus;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6151,6151 Restonomi;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7051,7051 Sosionomi;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7054,7054 Sairaanhoitaja;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7057,7057 Fysioterapeutti;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7061,7061 Terveydenhoitaja;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7063,7063 YAMK Sosiaali- ja terveysalan kehittƤminen j,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7064,7064 YAMK IkƤosaaminen;;,, +Koneen SƤƤtiƶ,Kone Foundation,,02135371,,,,, +Kotimaisten kielten keskus,Institute for the Languages of Finland,,02458728,,,,, +Kuopion yliopistollisen sairaalan erityisvastuualue,Kuopio University Hospital Catchment Area,,01714953,,,,, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,,10126,,,,, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,ALAB,"Hallinto, LAB",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,HLTI,"Hyvinvointi, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,HLPR,"Hyvinvointi, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,KLTI,"Kielikeskus, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,KLPR,"Kielikeskus, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,LLTI,"Liiketalous, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,LLPR,"Liiketalous, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,MLTI,"Marata, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,MLPR,"Marata, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,MILTI,"Muotoiluinstituutti, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,MILPR,"Muotoiluinstituutti, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,TLTI,"Teknologia, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,TLPR,"Teknologia, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,CLTI,"TietojenkƤsittely, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,CLPR,"TietojenkƤsittely, Lappeenranta",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,ILTI,"Tietotekniikka, Lahti",, +LAB-ammattikorkeakoulu,LAB University of Applied Sciences,LAB-ammattikorkeakoulu,10126,,ILPR,"Tietotekniikka, Lappeenranta",, Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,,,http://isni.org/isni/0000000404178973,1109 -Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,LI,Liiketalous ja matkailu,, Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,LP,LAMK Palvelut,, -Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,MD,Musiikki- ja draamainstituutti,, +Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,LI,Liiketalous ja matkailu,, Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,MI,Muotoiluinstituutti,, +Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,MD,Musiikki- ja draamainstituutti,, Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,ST,Sosiaali- ja terveys,, Lahden ammattikorkeakoulu,Lahti University of Applied Sciences,,02470,,TE,Tekniikka ja ympƤristƶ,, -Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,,,http://isni.org/isni/0000000446491976,1133 +Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,,,, +Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,Lapin ammattikorkeakoulu,10108,,300,Arktiset luonnonvarat ja talous,, Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,120,Hyvinvointipalvelut,, Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,130,Kauppa ja kulttuuri,, +Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,111,Korkeakoulupalvelut,, Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,140,Matkailupalvelut,, +Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,Lapin ammattikorkeakoulu,10108,,200,Pohjoinen hyvinvointi ja palvelut,, Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,150,Teollisuus ja luonnonvarat,, Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,160,YAMK,, Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,110,Yhteiset toiminnot,, -Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,,10108,,111,Korkeakoulupalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,,,http://isni.org/isni/000000010744995X,3448 -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,221200,Harjoittelukoulu,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,222000,Oikeustieteiden tiedekunta,, +Lapin ammattikorkeakoulu,Lapland University of Applied Sciences,Lapin ammattikorkeakoulu,10108,,100,Yhteiset toiminnot,, +Lapin yliopisto,University of Lapland,,01918,,,,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2263400,AK kirjasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250000,Arktinen hallinta ja hallinto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,225000,Arktinen keskus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250600,Arktisen antropologian tutkimusryhma,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250600,Arktisen antropologian tutkimusryhmƤ,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250500,Arktisen globaalimuutoksen tutkimusryhma,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250500,Arktisen globaalimuutoksen tutkimusryhmƤ,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,200250400,Arktisen kestƤvƤn kehityksen tutkimusryhmƤ,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299050,Arktisen opetuksen ja tutkimuksen rahasto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226100,Avoin yliopisto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2261100,Avoin yliopisto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,229000,Hallintoyksikko,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291600,Hankintatoimi,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,221200,Harjoittelukoulu,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,221300,Harjoittelukoulu (ylakoulu),, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2212100,Harjoittelukoulu perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2212000,Harjoittelukoulun hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291200,Henkilostoasiat,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298400,Henkilostoasiat,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291200,Henkilƶstƶasiat,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298400,Henkilƶstƶasiat,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291400,IT-palvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2210000,KTK hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2211100,KTK perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2264200,Kaannospalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297100,Kansainvalistymispalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299010,Kasvatustieteiden opetuksen ja tutkimuksen rahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,221000,Kasvatustieteiden tiedekunta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2264000,Kielikeskuksen hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226400,Kielikeskus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2264100,Kielikeskus,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,20026300,200263000,Kirjaston hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2263000,Kirjaston hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2294100,Kongressipalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2264300,Konsernin kielikeskus,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,20026300,200263050,Konsernin kirjasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,20026300,2263200,Taidekirjasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,20026300,2263400,AK kirjasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2263050,Konsernin kirjasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226200,Koulutus- ja kehittamispalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2262100,Koulutus- ja kehittamispalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226200,2262100,Koulutus- ja kehittƤmispalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298801,Kulttuurilahtoinen palvelumuotoilu -tohtoriohjelma,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298801,KulttuurilƤhtƶinen palvelumuotoilu -tohtoriohjelma,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226400,2264200,KƤƤnnƶspalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,228000,LUC Tietohallinto ja IT-palvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2280100,LUC Tietohallinto ja IT-palvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290800,Laadunhallintapalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2293300,Lapin korkeakoulukonserni,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226350,Lapin yliopistokustannus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226400,Kielikeskus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2293300,Lapin korkeakoulukonserni,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,229800,Yliopistotason tulot ja menot,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,221000,Kasvatustieteiden tiedekunta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,223000,Taiteiden tiedekunta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,221000,2210000,KTK hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,221000,2211100,KTK perustoiminta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,221000,2211200,Opetus- ja kasvatusalan koulutuspalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,221200,2212000,Harjoittelukoulun hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,221200,2212100,Harjoittelukoulu perustoiminta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,222000,2220000,OTK hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,222000,2220100,OTK perustoiminta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,223000,2230000,TTK hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,223000,2230100,TTK perustoiminta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,223000,2230110,TTK elinkeinotoiminta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,223000,2230120,TTK tƤydentƤvƤ rahoitus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224000,Yhteiskuntatieteiden tiedekunta,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224000,2240000,YTK hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224000,2240100,YTK perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2263500,Lapin yliopistokustannus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299100,Lapin yliopiston profilointirahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2296100,Lapin yliopistorahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2245000,MTI hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2245200,MTI taydentava rahoitus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224500,2245200,MTI tƤydentƤvƤ rahoitus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224500,Matkailun tutkimus- ja koulutusinstituutti,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2245100,Matkailun tutkimus- ja koulutusinstituutti,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2220000,OTK hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2220100,OTK perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299020,Oikeustieteiden opetuksen ja tutkimuksen rahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,222000,Oikeustieteiden tiedekunta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2211200,Opetus- ja kasvatusalan koulutuspalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2292100,Opetusvararehtori,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2292200,Opiskelijapalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2263100,Paakirjasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226300,Paakirjasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250400,Pohjoinen Poliittinen Talous,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224100,Pohjoisen yhteiskunnan tutkimusinstituutti,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224100,2241100,Pohjoisen yhteiskunnan tutkimusinstituutti,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224500,Matkailun tutkimus- ja koulutusinstituutti,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224500,2245000,MTI hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224500,2245100,Matkailun tutkimus- ja koulutusinstituutti,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224500,2245200,MTI tƤydentƤvƤ rahoitus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250000,Arktinen hallinta ja hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250100,TiedekeskusnƤyttely,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250200,TiedeviestintƤ,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2241100,Pohjoisen yhteiskunnan tutkimusinstituutti (LAPPEA,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2241100,Pohjoisen yhteiskunnan tutkimusinstituutti (LAPPEA),, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250300,Pohjoisen ymparisto- ja vahemmistooikeuden institu,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250300,Pohjoisen ymparisto- ja vahemmistooikeuden instituutti,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250300,Pohjoisen ympƤristƶ- ja vƤhemmistƶoikeuden instituutti,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,200250400,Arktisen kestƤvƤn kehityksen tutkimusryhmƤ,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250500,Arktisen globaalimuutoksen tutkimusryhmƤ,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226100,2261100,Avoin yliopisto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,226200,Koulutus- ja kehittƤmispalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226200,2262100,Koulutus- ja kehittƤmispalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298802,Pohjoiset kulttuurit ja kestava luonnonvarapolitii,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298802,Pohjoiset kulttuurit ja kestava luonnonvarapolitiikka,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298802,Pohjoiset kulttuurit ja kestƤvƤ luonnonvarapolitiikka,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,20026300,PƤƤkirjasto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,20026300,200263100,PƤƤkirjasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226350,2263500,Lapin yliopistokustannus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226400,2264000,Kielikeskuksen hallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226400,2264100,Kielikeskus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226400,2264200,KƤƤnnƶspalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,226400,2264300,Konsernin kielikeskus,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,229000,Hallintoyksikkƶ,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290100,Rehtori,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290200,Yleishallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290300,Suunnittelupalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290310,Taloushallintopalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290500,ViestintƤ- ja suhdetoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290100,Rehtori,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297300,Strateginen kansainvalistyminen,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290300,Suunnittelupalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2230110,TTK elinkeinotoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2230000,TTK hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2230100,TTK perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2230120,TTK taydentava rahoitus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,223000,2230120,TTK tƤydentƤvƤ rahoitus,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2263200,Taidekirjasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299030,Taiteiden opetuksen ja tutkimuksen rahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,223000,Taiteiden tiedekunta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290310,Taloushallintopalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298310,Taloushallintopalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290600,Taydentavan rahoituksen palvelupiste,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290700,Tekniset erityispalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250100,Tiedekeskusnayttely,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250100,TiedekeskusnƤyttely,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2293100,Tiedevararehtori,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250200,Tiedeviestinta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250200,TiedeviestintƤ,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298804,Tieteenfilosofia ja tutkijantaidot,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291300,Tietohallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291500,Tilahallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2295800,Tutkijakoulu,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291800,Tutkimuspalvelut,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2293100,Tutkimusvararehtori,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290600,TƤydentƤvƤn rahoituksen palvelupiste,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290700,Tekniset erityispalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290800,Laadunhallintapalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291100,Vararehtori,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291200,Henkilƶstƶasiat,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291300,Tietohallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291400,IT-palvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291500,Tilahallinto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291600,Hankintatoimi,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291700,Yliopistopaino,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2291800,Tutkimuspalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2292100,Opetusvararehtori,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2292200,Opiskelijapalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2293100,Tiedevararehtori,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2294100,Kongressipalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2295100,University of Arctic -rahasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2296100,Lapin yliopistorahasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297100,KansainvƤliset asiat,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297600,Uarctic perustoiminta,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297500,Uarctic-verkosto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298400,Henkilƶstƶasiat,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298801,KulttuurilƤhtƶinen palvelumuotoilu -tohtoriohjelma,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298802,Pohjoiset kulttuurit ja kestƤvƤ luonnonvarapolitiikka,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298803,Yhteisƶt ja muuttuva tyƶ,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298804,Tieteenfilosofia ja tutkijantaidot,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299010,Kasvatustieteiden opetuksen ja tutkimuksen rahasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299020,Oikeustieteiden opetuksen ja tutkimuksen rahasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299030,Taiteiden opetuksen ja tutkimuksen rahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2295100,University of Arctic -rahasto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291100,Vararehtori,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290500,Viestinta- ja suhdetoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290500,ViestintƤ- ja suhdetoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2240000,YTK hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2240100,YTK perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299040,Yhteiskuntatieteiden opetuksen ja tutkimuksen raha,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299040,Yhteiskuntatieteiden opetuksen ja tutkimuksen rahasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299050,Arktisen opetuksen ja tutkimuksen rahasto,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299100,Lapin yliopiston profilointirahasto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,,,http://isni.org/isni/0000000105333048,1150 -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A100A,PƄƄT Tiedekunnan yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A110A,PƄƄT Kanslia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A120A,PƄƄT Tiedekunnan opintopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A130A,PƄƄT KTK-yleisopinnot,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A200A,PƄƄT Talouden ja yritysjuridiikan laitos,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A210A,PƄƄT Laskentatoimi,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A220A,PƄƄT Rahoitus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A230A,PƄƄT Strategiatutkimus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A240A,PƄƄT Yritysjuridiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A250A,PƄƄT Talousjohtaminen (KTK),, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A300A,PƄƄT Johtamisen ja kv-liiketoiminnan lai,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A310A,PƄƄT Hankintojen johtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A320A,PƄƄT Johtaminen ja organisaatiot,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A330A,PƄƄT KansainvƤlinen markkinointi,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A340A,PƄƄT Tietojohtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A350A,PƄƄT MSIS,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A360E,PƄƄTLahden yksikkƶ,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A365A,PƄƄT Tietojohtaminen ja johtajuus (KTM),, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A370A,PƄƄT Johtaminen (KTK),, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A380A,PƄƄT Kv-liiketoiminta (KTK),, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A390A,PƄƄT Strateginen johtaminen (KTM),, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A410A,PƄƄT Nordi / KATI,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A500A,PƄƄT TBRC/KATI,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B100A,Teknillisen tiedekunnan yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B101A,PƄƄT Hybridilaboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B102A,"Yk-lisƤt,Tekn.tdk yhteiset",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B110A,Toimistopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B111A,PƄƄT Toimistopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B120A,Opintopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B130A,PƄƄT Tekniset palvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B131A,PƄƄT Elektroniikan suunnittelukeskus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B132A,PƄƄT Mekaniikkapaja,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B133A,PƄƄT TEPA Energia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B134A,PƄƄT TEPA Kemia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B135A,PƄƄT TEPA Metalli,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224000,Yhteiskuntatieteiden tiedekunta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298803,Yhteisot ja muuttuva tyo,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298803,Yhteisƶt ja muuttuva tyƶ,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2213000,Ylakoulun hallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2213100,Ylakoulun perustoiminta,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2290200,Yleishallinto,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291700,Yliopistopaino,, +Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,229800,Yliopistotason tulot ja menot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K462A,AV-palvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B302A,Analyysipalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K810A,"ApurahajƤrjestelmƤ, LUT",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F150A,Asiakkuudet,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F130A,Avoin yliopisto,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB22A,Back Office,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B390A,CEID-keskus yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E183A,Cost Management,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B363A,Erotuskonseptit,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E170A,EtelƤ-Karjala-instituutti,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB21A,Front Office,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K500A,HR,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K110A,Hakupalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K471A,Hankintapalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E180A,IEM Yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K400A,IS&T,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB20A,IS&T + Project office,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F220A,Imatran toimipiste,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB53A,Imatran toimipiste,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC30A,Innovaatiopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC10A,"Innovaatiot ja yrittƤjyys, yhteiset",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC11A,"Innovaatiot ja yrittƤjyys, yk-laskenta",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E182A,Innovation Management,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E130A,Innovation and software,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E160D,Innovation and software/KOUVOLA,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E151E,Innovation and software/LAHTI,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E150E,"Internat business,marketing,entrep/LAHTI",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E120A,"International business,marketing,entrepr",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B380A,Inversio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K800A,KansainvƤlisyys,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B365A,Kemiallinen metrologia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B351A,Kemialliset erotustekniikat,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B140A,Kielikeskus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B141A,PƄƄT Kielikeskus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B150A,PƄƄT CST,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B170A,PƄƄT LUT voima,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B200A,LUT School of Energy Systems yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B201A,"Yk-lisƤt, LES",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B210A,Energiatekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B211A,LƤmpƶ- ja virtaustekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B212A,Teknillinen termodynamiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B213A,Virtaustekniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B214A,Uusiutuvien energiajƤrjestelmien laborat,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B215A,PƄƄT Ydinvoimatekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B216A,PƄƄT Ydinturvallisuuden tutkimusyksikkƶ,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B217A,Ydintekniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B220G,Bioenergian laboratorio/Mikkeli,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B221J,PƄƄT Energia/Varkaus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B230A,SƤhkƶtekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B231A,SƤhkƶmarkkinalaboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B232A,SƤhkƶnkƤyttƶtekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B233A,CDMC-laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B234A,SƤƤtƶ- ja digitaalitekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B235A,Sovelletun elektroniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B236A,PƄƄT SƤhkƶmekaaniset jƤrjestelmƤt,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B237A,Aurinkotalousprofessuuri,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B240A,"KestƤvyystutkimus, Lappeenranta",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B241E,"KestƤvyystutkimus, Lahti",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B241G,PƄƄT VihreƤ kemia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B250A,LUT Kone yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B250G,PƄƄT Materiaalitekniikka/Mikkeli,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B251A,Hitsaustekniikan laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B354A,KiinteƤ/neste-erotus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B382A,KiinteƤn aineen fysiikka,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K210A,Kirjanpito,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K620A,Kirjanpito,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F200A,Kirjasto,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F100A,Koke yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B386A,KonenƤkƶ ja hahmontunnistus,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B252N,Koneteknologia/Turku,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B253A,Lasertyƶstƶ,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B254A,Tuotantotekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B255A,Koneensuunnittelun laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B256A,TerƤsrakenteiden laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E163D,Kouvola,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E162D,Kouvola/common,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B257C,Kuitukomposiittilaboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B258A,Konedynamiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B259A,ƄlykkƤiden koneiden laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B260G,PƄƄT LUT Savo,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B290A,LUT Voima,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B2AAA,Alusta LES Reflex,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B2ABA,Alusta LES SIM,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B2ACA,Alusta LES RE-SOURSE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B2ADA,Alusta LES DIGI-USER,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B2AEA,Alusta LES SAWE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B2AFA,Alusta LES RED,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K330A,Kumppanuudet ja VenƤjƤtoiminnot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K470A,KƤyttƶtukipalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J310A,LUMA,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B300A,LUT Kemia yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B301A,CST,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B302A,Analyysipalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B303A,LENS tekniset palvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KA00A,LUT tohtorikoulu,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB51A,LUTin kirjastopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E153E,Lahti/common,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J130A,Lakiasiat,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B381A,Laskennallinen materiaalitiede,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB40A,LƤhipalvelut,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B304A,Matematiikan ja fysiikan laitoksen yht,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B305A,"Opetus, laskennallinen tekniikka",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B306A,"Opetus, kemian tekniikka",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B308A,"Yk-lisƤt, LENS",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B309A,School of Engineering Science yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B310A,PƄƄT Kemia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B311A,PƄƄT Teknillinen kemia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B312A,PƄƄT Membraanitekniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B320A,PƄƄT Yksikkƶoperaatiot,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B321A,PƄƄT Tuote- ja prosessikehitys,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B322A,PƄƄT Systeemitekniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B323A,PƄƄT Fysikaalinen kemia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B324A,PƄƄT KiinteƤ/neste-erotus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B325A,PƄƄT Virtausprosessi,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B326A,PƄƄT Erotustekniikka ja prosessi-integro,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B330A,PƄƄT Kuitu- ja paperitekniikan laborator,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B331A,PƄƄT Paperinjalostus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B340H,PƄƄT Fiber Tech -tutkimusyksikkƶ/Savonli,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B351A,Kemialliset erotustekniikat,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B383H,Materiaalifysiikka,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B352A,Membraanitekniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B353A,Termiset yksikkƶoperaatiot,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B354A,KiinteƤ/neste-erotus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K472A,Mikroluokkapalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K301A,Nordi,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E161D,Operations management and system/KOUVOLA,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E140A,Operations management and systems engine,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E152E,Operations management and systems/LAHTI,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B306A,"Opetus, kemian tekniikka",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B305A,"Opetus, laskennallinen tekniikka",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K480A,Opetusteknologia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K120A,Opintoasioiden kehittƤminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B120A,Opintopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E101A,Opintopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K100A,Opintotoimisto,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B385A,Optoelektroniikka,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K450A,Palvelukehitys,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K460A,Perusresurssipalvelut,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B361A,Prosessikehitys,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B362A,Virtausprosessit,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B363A,Erotuskonseptit,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K473A,Puhelinpalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K440A,PƄƄT AV-palvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C500A,PƄƄT Arvoverkostojen johtaminen,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B364A,PƄƄT Biomateriaalit,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B365A,Kemiallinen metrologia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B371G,VihreƤ kemia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B380A,Inversio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B381A,Laskennallinen materiaalitiede,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B382A,KiinteƤn aineen fysiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B383H,Materiaalifysiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B385A,Optoelektroniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B386A,KonenƤkƶ ja hahmontunnistus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B387D,PƄƄT KonenƤkƶ ja hahmotunnistus Kouvola,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B388A,PƄƄT Kemiallinen metrologia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B390A,CEID-keskus yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B391J,Varkaus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B3AAA,Alusta LENS Reflex,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B3ABA,Alusta LENS SIM,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B3ACA,Alusta LENS RE-SOURSE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B3ADA,Alusta LENS DIGI-USER,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B3AEA,Alusta LENS SAWE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B3AFA,Alusta LENS RED,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B400A,PƄƄT LUT Kone yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B600A,PƄƄT CEID-keskus yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B150A,PƄƄT CST,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K490A,PƄƄT Digipaino,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B131A,PƄƄT Elektroniikan suunnittelukeskus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B221J,PƄƄT Energia/Varkaus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B326A,PƄƄT Erotustekniikka ja prosessi-integro,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F300A,PƄƄT EtelƤ-Karjala-instituutti,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B340H,PƄƄT Fiber Tech -tutkimusyksikkƶ/Savonli,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B323A,PƄƄT Fysikaalinen kemia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A310A,PƄƄT Hankintojen johtaminen,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B410A,PƄƄT Hitsaustekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B411N,PƄƄT Koneteknologia/Turku,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B412A,PƄƄT Lasertyƶstƶ,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B413G,PƄƄT Materiaalitekniikka/Mikkeli,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B420A,PƄƄT Konepajan ja levytyƶtekniikan labor,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B101A,PƄƄT Hybridilaboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C272A,PƄƄT Innovaatio- ja teknolog./TBRC,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C236A,PƄƄT Innovaatio- ja teknologiajohtaminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C270A,PƄƄT Innovaatio- ja teknologiajohtaminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C271A,PƄƄT Innovaatio- ja teknologiajohtaminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C400A,PƄƄT Innovaatiojohtaminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B510A,PƄƄT Inversio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A370A,PƄƄT Johtaminen (KTK),, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A320A,PƄƄT Johtaminen ja organisaatiot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A300A,PƄƄT Johtamisen ja kv-liiketoiminnan lai,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A130A,PƄƄT KTK-yleisopinnot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A330A,PƄƄT KansainvƤlinen markkinointi,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C211A,PƄƄT KansainvƤliset toiminnot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A110A,PƄƄT Kanslia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B310A,PƄƄT Kemia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B388A,PƄƄT Kemiallinen metrologia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B141A,PƄƄT Kielikeskus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B324A,PƄƄT KiinteƤ/neste-erotus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B460A,PƄƄT Konedynamiikka,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B430A,PƄƄT Koneensuunnittelun laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B440A,PƄƄT TerƤsrakenteiden laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B531D,PƄƄT KonenƤkƶ ja hahmontunnistus Kouvola,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B530A,PƄƄT KonenƤkƶ ja hahmotunnistus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B387D,PƄƄT KonenƤkƶ ja hahmotunnistus Kouvola,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C320A,PƄƄT KonenƤƶn ja hahmontunnistuksen labo,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B420A,PƄƄT Konepajan ja levytyƶtekniikan labor,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B411N,PƄƄT Koneteknologia/Turku,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C600D,PƄƄT Kouvolan yksikkƶ,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B330A,PƄƄT Kuitu- ja paperitekniikan laborator,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B450C,PƄƄT Kuitukomposiittilaboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B460A,PƄƄT Konedynamiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B470A,PƄƄT ƄlykkƤiden koneiden laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B500A,PƄƄT Matematiikan ja fysiikan laitokset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B505A,PƄƄT Mafy yhteiset opetus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B510A,PƄƄT Inversio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B511G,PƄƄT Materiaalitekniikka/Mikkeli,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C233A,PƄƄT Kustannusjohtaminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A380A,PƄƄT Kv-liiketoiminta (KTK),, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K410A,PƄƄT KƤyttƶtuki,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B400A,PƄƄT LUT Kone yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B260G,PƄƄT LUT Savo,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B170A,PƄƄT LUT voima,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C700E,PƄƄT Lahti School of Innovation,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C250E,PƄƄT Lahti/School of Innovation,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B412A,PƄƄT Lasertyƶstƶ,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B515A,PƄƄT Laskennallinen materiaalitiede,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A210A,PƄƄT Laskentatoimi,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A350A,PƄƄT MSIS,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B505A,PƄƄT Mafy yhteiset opetus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B500A,PƄƄT Matematiikan ja fysiikan laitokset,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B520A,PƄƄT Materiaalifysiikka,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B250G,PƄƄT Materiaalitekniikka/Mikkeli,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B413G,PƄƄT Materiaalitekniikka/Mikkeli,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B511G,PƄƄT Materiaalitekniikka/Mikkeli,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B132A,PƄƄT Mekaniikkapaja,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B312A,PƄƄT Membraanitekniikka,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F400A,PƄƄT Nordi,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A410A,PƄƄT Nordi / KATI,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C330A,PƄƄT Ohjelmistotekniikan laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C300A,PƄƄT Ohjelmistotuotanto ja tiedonhallint,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C800A,PƄƄT Ohjelmistotutanto ja tiedonhallinta,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B525A,PƄƄT Optoelektroniikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B530A,PƄƄT KonenƤkƶ ja hahmotunnistus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B531D,PƄƄT KonenƤkƶ ja hahmontunnistus Kouvola,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B600A,PƄƄT CEID-keskus yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B610A,PƄƄT ƄlykkƤiden koneiden laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B620J,PƄƄT Varkaus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C100A,PƄƄT Tiedekunnan hallintopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C110A,PƄƄT Tiedekunnan opintopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C120A,PƄƄT Tiedekunnan yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C200A,PƄƄT Tuotantotalouden osaston yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C210A,PƄƄT Teollisen markkinoinnin ja kansainv,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C211A,PƄƄT KansainvƤliset toiminnot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K430A,PƄƄT Palvelutuotanto,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B331A,PƄƄT Paperinjalostus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K420A,PƄƄT Project Office,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C232A,PƄƄT PƤƤtƶksenteontukisysteemit,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A220A,PƄƄT Rahoitus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C262D,PƄƄT Rautatielogistiikka/Kouvola,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C240A,PƄƄT RyhmƤpƤƤtƶksenteon ja teknologian j,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J120A,PƄƄT Strategia ja yhteiskuntasuhteet,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A230A,PƄƄT Strategiatutkimus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A390A,PƄƄT Strateginen johtaminen (KTM),, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B322A,PƄƄT Systeemitekniikka,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B236A,PƄƄT SƤhkƶmekaaniset jƤrjestelmƤt,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23G300A,PƄƄT TBRC,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A500A,PƄƄT TBRC/KATI,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B133A,PƄƄT TEPA Energia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B134A,PƄƄT TEPA Kemia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B135A,PƄƄT TEPA Metalli,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A200A,PƄƄT Talouden ja yritysjuridiikan laitos,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A250A,PƄƄT Talousjohtaminen (KTK),, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B311A,PƄƄT Teknillinen kemia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B130A,PƄƄT Tekniset palvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C234A,PƄƄT TeknologiayrittƤjyys,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C212A,PƄƄT Teollinen markkinointi,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C220A,PƄƄT Toimitusketjun johtamisen laborator,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C221A,PƄƄT Toimitusketjun johtaminen/TBRC,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C210A,PƄƄT Teollisen markkinoinnin ja kansainv,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C230A,PƄƄT Teollisuustalouden laboratorio yhte,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C231A,PƄƄT Teollisuustalous,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C232A,PƄƄT PƤƤtƶksenteontukisysteemit,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C233A,PƄƄT Kustannusjohtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C234A,PƄƄT TeknologiayrittƤjyys,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C235A,PƄƄT Tietojohtaminen/tuotantotalous,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C236A,PƄƄT Innovaatio- ja teknologiajohtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C240A,PƄƄT RyhmƤpƤƤtƶksenteon ja teknologian j,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C241M,PƄƄT Tuotantotalous/Lapua,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C250E,PƄƄT Lahti/School of Innovation,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C260D,PƄƄT Tuotantotalous/Kouvola yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C261D,PƄƄT Tuotantotalous/Kouvola,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C262D,PƄƄT Rautatielogistiikka/Kouvola,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C270A,PƄƄT Innovaatio- ja teknologiajohtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C271A,PƄƄT Innovaatio- ja teknologiajohtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C272A,PƄƄT Innovaatio- ja teknolog./TBRC,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B440A,PƄƄT TerƤsrakenteiden laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C100A,PƄƄT Tiedekunnan hallintopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A120A,PƄƄT Tiedekunnan opintopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C110A,PƄƄT Tiedekunnan opintopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A100A,PƄƄT Tiedekunnan yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C120A,PƄƄT Tiedekunnan yhteiset,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C281A,PƄƄT Tieto- ja kustannusjohtaminen,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C283A,PƄƄT Tieto- ja kustannusjohtaminen/TBRC,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C300A,PƄƄT Ohjelmistotuotanto ja tiedonhallint,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A340A,PƄƄT Tietojohtaminen,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A365A,PƄƄT Tietojohtaminen ja johtajuus (KTM),, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C235A,PƄƄT Tietojohtaminen/tuotantotalous,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C340A,PƄƄT Tietojohtaminen/tuta,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C310A,PƄƄT Tietoliikenneohjelmistojen laborato,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C320A,PƄƄT KonenƤƶn ja hahmontunnistuksen labo,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C321D,PƄƄT Tietoyhteiskunnan teknologiat/Kouvo,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C330A,PƄƄT Ohjelmistotekniikan laboratorio,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C340A,PƄƄT Tietojohtaminen/tuta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C400A,PƄƄT Innovaatiojohtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C500A,PƄƄT Arvoverkostojen johtaminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C600D,PƄƄT Kouvolan yksikkƶ,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C700E,PƄƄT Lahti School of Innovation,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C800A,PƄƄT Ohjelmistotutanto ja tiedonhallinta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E100A,Kanslia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E101A,Opintopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E102A,Schoolin yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E103A,"Yk-lisƤt, LBM",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E110A,"Strategy,management and accounting",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E120A,"International business,marketing,entrepr",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E121G,IBME/Mikkeli,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E130A,Innovation and software,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E140A,Operations management and systems engine,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E150E,"Internat business,marketing,entrep/LAHTI",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E151E,Innovation and software/LAHTI,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E152E,Operations management and systems/LAHTI,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E153E,Lahti/common,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E160D,Innovation and software/KOUVOLA,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E161D,Operations management and system/KOUVOLA,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E162D,Kouvola/common,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E163D,Kouvola,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E170A,EtelƤ-Karjala-instituutti,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E180A,IEM Yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E181A,Software Engineering,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E182A,Innovation Management,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E183A,Cost Management,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E184A,Systems Engineering,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E185A,Supply Chain Management,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E1AAA,Alusta LBM Reflex,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E1ABA,Alusta LBM SIM,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E1ACA,Alusta LBM RE-SOURSE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E1ADA,Alusta LBM DIGI-USER,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E1AEA,Alusta LBM SAWE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E1AFA,Alusta LBM RED,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F100A,Koke yhteiset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F101A,"Yk-lisƤt, Koulutus- ja kehittƤmiskeskus",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F110A,TƤydennyskoulutus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F120A,Tutkintotavoitteinen koulutus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F130A,Avoin yliopisto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F140A,Tutkimus- ja kehittƤmistoiminta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F150A,Asiakkuudet,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F200A,Kirjasto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F201A,"Yk-lisƤt, Kirjasto",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F210A,Yhteinen tiedekirjasto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F220A,Imatran toimipiste,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B111A,PƄƄT Toimistopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C221A,PƄƄT Toimitusketjun johtaminen/TBRC,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C220A,PƄƄT Toimitusketjun johtamisen laborator,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C200A,PƄƄT Tuotantotalouden osaston yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C261D,PƄƄT Tuotantotalous/Kouvola,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C260D,PƄƄT Tuotantotalous/Kouvola yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23C241M,PƄƄT Tuotantotalous/Lapua,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B321A,PƄƄT Tuote- ja prosessikehitys,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B620J,PƄƄT Varkaus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B241G,PƄƄT VihreƤ kemia,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B325A,PƄƄT Virtausprosessi,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B216A,PƄƄT Ydinturvallisuuden tutkimusyksikkƶ,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B215A,PƄƄT Ydinvoimatekniikan laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B320A,PƄƄT Yksikkƶoperaatiot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A240A,PƄƄT Yritysjuridiikka,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B470A,PƄƄT ƄlykkƤiden koneiden laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B610A,PƄƄT ƄlykkƤiden koneiden laboratorio,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23A360E,PƄƄTLahden yksikkƶ,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K640A,Rahoitus ja tarkastustoiminta,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F230A,Saimia Kirjasto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F300A,PƄƄT EtelƤ-Karjala-instituutti,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F400A,PƄƄT Nordi,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23G300A,PƄƄT TBRC,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J100A,Yliopiston johto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J110A,Toiminta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J120A,PƄƄT Strategia ja yhteiskuntasuhteet,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J130A,Lakiasiat,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J200A,Toiminnan ohjaus ja laadunhallinta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J210A,Toiminnan suunnittelu,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J300A,Erityishankkeet,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J310A,LUMA,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J400A,LUT Investointiohjelma,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J410A,IPR ja patentit,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J500A,Alusta REFLEX,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J501A,Alusta SIM,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J502A,Aloittavat tutkimusalustat,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J503A,Alusta RE-SOURSE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J504A,Alusta DIGI-USER,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J505A,Alusta SAWE,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J506A,Alusta RED,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K100A,Opintotoimisto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K110A,Hakupalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K120A,Opintoasioiden kehittƤminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K200A,Talouspalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K210A,Kirjanpito,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB52A,Saimian kirjastopalvelut,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K220A,SisƤinen laskenta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K300A,Tutkimus- ja innovaatiopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K301A,Nordi,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K630A,SisƤinen laskenta,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E181A,Software Engineering,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E110A,"Strategy,management and accounting",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E185A,Supply Chain Management,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23E184A,Systems Engineering,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K302A,TBRC,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K310A,Kansallinen rahoitus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K320A,EU-rahoitus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K330A,Kumppanuudet ja VenƤjƤtoiminnot,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K340A,Tutkimuspolitiikka,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K400A,IS&T,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K410A,PƄƄT KƤyttƶtuki,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K420A,PƄƄT Project Office,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K430A,PƄƄT Palvelutuotanto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K440A,PƄƄT AV-palvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K450A,Palvelukehitys,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K460A,Perusresurssipalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K461A,Tulostus- ja kopiointipalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K462A,AV-palvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K470A,KƤyttƶtukipalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K471A,Hankintapalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K472A,Mikroluokkapalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K473A,Puhelinpalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K480A,Opetusteknologia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K490A,PƄƄT Digipaino,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K500A,HR,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K600A,Tilapalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K200A,Talouspalvelut,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K610A,Talouspalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K620A,Kirjanpito,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K630A,SisƤinen laskenta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K640A,Rahoitus ja tarkastustoiminta,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K680A,Tilapalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K690A,Tilakustannukset,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K700A,ViestintƤpalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K800A,KansainvƤlisyys,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K810A,"ApurahajƤrjestelmƤ, LUT",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K820A,Opiskelijapalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K830A,Koulutuspalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K840A,TietojƤrjestelmien kehittƤminen,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K850A,Apurahat,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K860A,Kielikeskus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K870A,Opetusteknologia,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K900A,Yliopistopaino,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KA00A,LUT tohtorikoulu,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B100A,Teknillisen tiedekunnan yhteiset,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B353A,Termiset yksikkƶoperaatiot,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K600A,Tilapalvelut,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB10A,Toiminnan ohjaus ja laadun hallinta,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23J200A,Toiminnan ohjaus ja laadunhallinta,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB11A,Toiminnan suunnittelu,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB20A,IS&T + Project office,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB21A,Front Office,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB22A,Back Office,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB23A,Yliopistopaino,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB40A,LƤhipalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B110A,Toimistopalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K461A,Tulostus- ja kopiointipalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F140A,Tutkimus- ja kehittƤmistoiminta,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F120A,Tutkintotavoitteinen koulutus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F110A,TƤydennyskoulutus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC40A,TƤydennyskoulutus,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC50A,Urapalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K700A,ViestintƤpalvelut,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B362A,Virtausprosessit,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F210A,Yhteinen tiedekirjasto,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB50A,Yhteiskirjasto,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB51A,LUTin kirjastopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB52A,Saimian kirjastopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB53A,Imatran toimipiste,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F201A,"Yk-lisƤt, Kirjasto",, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB54A,"Yk-lisƤt, Kirjasto",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC10A,"Innovaatiot ja yrittƤjyys, yhteiset",, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC11A,"Innovaatiot ja yrittƤjyys, yk-laskenta",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23F101A,"Yk-lisƤt, Koulutus- ja kehittƤmiskeskus",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23B102A,"Yk-lisƤt,Tekn.tdk yhteiset",, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23K900A,Yliopistopaino,, +Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KB23A,Yliopistopaino,, Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC20A,YrittƤjyys,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC30A,Innovaatiopalvelut,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC40A,TƤydennyskoulutus,, -Lappeenrannan teknillinen yliopisto,Lappeenranta University of Technology,,01914,,23KC50A,Urapalvelut,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,,,http://isni.org/isni/0000000404001203,1179 -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,HL,Liiketalous,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,HS,Turvallisuusala,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,LAU,Laurea,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,MP,Matkailu- ja palveluliiketoiminta,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,NT,TietojenkƤsittely,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,SF,Fysioterapia,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,SH,Terveysala,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,SK,Kauneudenhoitoala,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,SS,Sosiaaliala,, -Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,ST,Sosiaali- ja terveysala,, -Maanpuolustuskorkeakoulu, The National Defence University,Fƶrsvarshƶgskolan,02358,,,,http://isni.org/isni/0000000406476253,1232 -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,,,http://isni.org/isni/0000000119134955,24 -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,180,Rehtori,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,189,Talous- ja hallintopalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,190,Henkilƶstƶpalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,191,Tietohallintopalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,207,Kirjasto- ja tietopalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,208,Kv-tukipalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,209,Opintoasiainpalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,219,JƤrjestelmƤyllƤpito,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,220,KƤyttƤjƤtuki,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,221,Tiedonhallinta- ja jƤrjestelmƤpalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,,01914,,,,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J502A,Aloittavat tutkimusPlatformt,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K850A,Apurahat,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K652A,Asiakaspalvelu,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD31A,Asiakirjapalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD43A,Asiakirjapalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B237A,Aurinkotalousprofessuuri,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B220G,Bioenergian laboratorio/Mikkeli,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E122A,Bus yhteiset,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E122A,Business studies,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B233A,CDMC-laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B301A,CST,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B24BE,Circular Economy Lahti,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B0A,Common IEM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3E3E,"Common, Lahti",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B360A,Computational and Process Engineering,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B3A,Cost Management,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K320A,EU-rahoitus,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B21BE,EnTeDI Lahti,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B21AN,EnTeDI Turku,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B210A,Energiatekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3E0E,Entrepreneurship and management,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J300A,Erityishankkeet,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B202N,FITECH Turku,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E129A,Finance and Business Analytics(FBA),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J320A,Green Campus Open -kiihdyttƤmƶtoiminta,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B371G,Green Chemistry,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE10A,HR- ja talouspalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE42A,"Hankinta, tarkastus, verotus, sijoitustoiminta ja",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE21A,Henkilƶstƶn kehittƤmis- ja hyvinvointipalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B251A,Hitsaustekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K310A,Huipputiederahoitus,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E104A,Hyneman Center,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E123A,IBE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E121G,IBM/Mikkeli,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K653A,ICT Infra,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B6A,IEM/Management Research lab,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J410A,IPR ja patentit,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K651A,IS&T + Project office,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K664A,Imatran toimipiste,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B25AE,Industrial Desing Engineering,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B2A,Innovation Management,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3E1E,Innovation management/LAHTI,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E123A,International Business and Entrepreneurship (IBE),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E121G,International Business and Entrepreneurship-Mikkel,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12DE,International Marketing (IM) Lahti,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E125A,International Marketing(IM),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E100A,Kanslia,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B30AA,Kemiantekniikan koulutusohjelma,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B241E,KestƤvyysmuutoksen laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B242E,KestƤvyystiede,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B240A,KestƤvƤt ratkaisut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K860A,Kielikeskus,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE40A,Kirjanpito ja tilinpƤƤtƶs,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12AA,Knowledge Management(KM),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12BE,Knowledge Management-Lahti(KM),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B258A,Konedynamiikka,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B255A,Koneensuunnittelun laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KZ10A,Korkeakoulupalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K830A,Koulutuspalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K821A,Kouluyhteistyƶ,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3D0D,Kouvola,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B257A,Kuitukomposiittilaboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K666A,LABin kirjastopalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K667A,LAMKin kirjastopalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E124A,LAMO,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E100A,LBM Office,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E105A,LBM/Management Research lab,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B303A,LENS tekniset palvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J400A,LUT Investointiohjelma,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B250A,LUT Kone yhteiset,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J330A,LUT Oivallus,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B200A,LUT School of Energy Systems yhteiset,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B290A,LUT Voima,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KZ11A,"LUT-yk, KKP",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K662A,LUTin kirjastopalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD40A,Lakipalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B253A,Lasertyƶstƶ,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B218A,Laskennallinen virtausmekaniikka,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B30BA,Laskennallisen tekniikan koulutusohjelma,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B211A,LƤmpƶ- ja virtaustekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E125A,MIMM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E129A,MSF&MBAN,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E126A,MSIS,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E127A,MSM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE30A,Matkustus- ja kv-tyƶskentelyn palvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD32A,Matkustuspalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B307A,Opetus LENS,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K870A,Opetusteknologia,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K820A,Opiskelijapalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K601A,Oppimis- ja tilapalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K655A,PalvelujƤrjestelmƤt,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD12A,Partneruudet ja verkostot,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3E2E,Performance management/LAHTI,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B370A,Physics,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J504A,Platform DIGI-USER,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AAA,Platform LBM Reflex,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1ADA,Platform LBM DIGI-USER,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1ACA,Platform LBM RE-SOURCE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AFA,Platform LBM RED,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AAA,Platform LBM Reflex,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AEA,Platform LBM SAWE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1ABA,Platform LBM SIM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3AAA,Platform LENS Reflex,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3ADA,Platform LENS DIGI-USER,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3ACA,Platform LENS RE-SOURCE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3AFA,Platform LENS RED,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3AEA,Platform LENS SAWE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3ABA,Platform LENS SIM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B2AAA,Platform LES Reflex,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B2ADA,Platform LES DIGI-USER,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B2ACA,Platform LES RE-SOURCE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B2AFA,Platform LES RED,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B2AEA,Platform LES SAWE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B2ABA,Platform LES SIM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J503A,Platform RE-SOURCE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J506A,Platform RED,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J500A,Platform REFLEX,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J505A,Platform SAWE,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J501A,Platform SIM,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K350A,Projektien hallinta,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE43A,Projektipalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K300A,Rahoitushakupalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE20A,Rekrytointi ja tyƶsopimuspalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K663A,Saimian kirjastopalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B309A,School of Engineering Science yhteiset,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E102A,Schoolin yhteiset,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B350A,Separation Science,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B350A,Separation and Purification Technology,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD30A,Sihteeripalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B1A,Software Engineering,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3C0A,Software Engineering,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B235A,Sovelletun elektroniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD11A,Strateginen henkilƶstƶ kehittƤminen,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD10A,Strateginen suunnittelu ja toiminnan ohjaus,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12CA,Strategy and accounting (SA),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B5A,Supply Chain Management,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E127A,Supply Management(SM),, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B4A,Systems Engineering,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B231A,SƤhkƶmarkkinalaboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B232A,SƤhkƶnkƤyttƶtekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B230A,SƤhkƶtekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B234A,SƤƤtƶ- ja digitaalitekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD42A,TES- ja tyƶnantajapalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12AA,TIJO-LPR,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12BE,TIJO-Lahti,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE41A,Talouden suunnittelu ja seuranta,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B212A,Teknillinen termodynamiikka,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B256A,TerƤsrakenteiden laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K840A,TietojƤrjestelmien kehittƤminen,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B30CA,Tietotekniikan koulutusohjelma,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K690A,Tilakustannukset,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K691A,Tilakustannukset Saimaa,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K680A,Tilapalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K681A,Tilapalvelut Lahti,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J210A,Toiminnan sunnittelu,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J110A,Toiminta,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B30DA,Tuotantotalouden koulutusohjelma,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B254A,Tuotantotekniikan laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K300A,Tutkimuksen palvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K340A,Tutkimuspolitiikka,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE21A,Tyƶterveys- tyƶhyvinvointi- ja tyƶsuhdepalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K880A,TƤydennyskoulutus liiketoiminta,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K881A,TƤydennyskoulutus yhteisrahoitteinen,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B214A,Uusiutuvien energiajƤrjestelmien laboratorio,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B391J,Varkaus,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD20A,ViestintƤ- ja markkinointipalvelut,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B213A,Virtaustekniikka,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B217A,Ydintekniikka,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K661A,Yhteiskirjasto,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K665A,"Yk-lisƤt, Kirjasto",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E103A,"Yk-lisƤt, LBM",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B308A,"Yk-lisƤt, LENS",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B201A,"Yk-lisƤt, LES",, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J100A,Yliopiston johto,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K654A,Yliopistopaino,, +Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B259A,ƃā€žlykkƤiden koneiden laboratorio,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,,02629,,,,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,SF,Fysioterapia,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,SK,Kauneudenhoitoala,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,LAU,Laurea,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,HL,Liiketalous,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,MP,Matkailu- ja palveluliiketoiminta,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,ST,Sosiaali- ja terveysala,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,SS,Sosiaaliala,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,SH,Terveysala,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,NT,TietojenkƤsittely,, +Laurea-ammattikorkeakoulu,Laurea University of Applied Sciences,Laurea-ammattikorkeakoulu,02629,,HS,Turvallisuusala,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,02446292,,,,http://isni.org/isni/0000000446686757, +Luonnonvarakeskus,Natural Resources Institute Finland,,4100010,,,,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411610,Aineistopalvelut HaapastensyrjƤ,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410510,"Aineistopalvelut HaapastensyrjƤ, Suonenjoki",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411110,Aineistopalvelut Helsinki,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410610,"Aineistopalvelut Joensuu, Savonlinna",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411310,"Aineistopalvelut Jokioinen, Piikkiƶ kasvintuotanto",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411410,"Aineistopalvelut Jokioinen, kotielƤintuotanto ja uudet tuotantomuodot",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410410,"Aineistopalvelut Kokkola, SeinƤjoki",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410910,Aineistopalvelut Maaninka,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410910,"Aineistopalvelut Maaninka, Kainuu",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411010,"Aineistopalvelut Oulu, Siikajoki",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410410,"Aineistopalvelut Parkano, Kokkola, SeinƤjoki",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410710,"Aineistopalvelut Rovaniemi, Inari, Utsjoki",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411510,Aineistopalvelut Suonenjoki,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110910,Akvaattisten populaatioiden dynamiikka,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310110,BITA Esikunta,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211210,Biojalostusteknologiat ja tuotteet,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211110,Bioraaka-aineiden rakenne ja ominaisuudet,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510310,Biotalouden kannattavuus,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510210,Biotalouden tilastot,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310110,Biotalous ja ympƤristƶ / BITA Esikunta (4100310110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310610,Biotalous ja ympƤristƶ / Hiilen kierron hallinta (4100310610),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310810,Biotalous ja ympƤristƶ / KestƤvyystutkimus ja indikaattorit (4100310810),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310210,Biotalous ja ympƤristƶ / Luonnonvarapolitiikat ja -markkinat (4100310210),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310710,Biotalous ja ympƤristƶ / MaankƤyttƶ ja aluesuunnittelu (4100310710),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310510,Biotalous ja ympƤristƶ / MetsƤvarojen inventointi ja metsƤsuunnittelu (4100310510),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310410,Biotalous ja ympƤristƶ / Virkistys ja luontoarvot (4100310410),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310310,Biotalous ja ympƤristƶ / Yritys- ja ympƤristƶtalous (4100310310),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100311110,Ekosysteemit ja mallinnus,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211310,Elintarvikkeiden prosessointi ja laatu,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210310,ElƤingenetiikka,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211510,ElƤinravitsemus,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610110,Esikunta/PƤƤjohtaja,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210310,Genomiikka ja jalostus,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610410,Henkilƶstƶpalvelut,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310610,Hiilen kierron hallinta,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410110,INFRA Esikunta,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610710,IT ja digitaaliset ratkaisut,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111110,Kalastus ja kalavarat,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210210,Kasvigenetiikka,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110610,Kasvinterveys,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310810,KestƤvyystutkimus ja indikaattorit,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110110,LUVA Esikunta,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211010,Liha- ja non-food elƤintuotanto,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310210,Luonnonvarapolitiikat ja -markkinat,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110910,Luonnonvarat / Akvaattisten populaatioiden dynamiikka (4100110910),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110610,Luonnonvarat / Kasvinterveys (4100110610),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110110,Luonnonvarat / LUVA Esikunta (4100110110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110510,Luonnonvarat / MaaperƤekosysteemit (4100110510),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110710,Luonnonvarat / Metsien terveys ja biodiversiteetti (4100110710),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110310,Luonnonvarat / MetsƤnhoito (4100110310),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110210,Luonnonvarat / Peltokasvien tuotanto (4100110210),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110810,Luonnonvarat / Riistapopulaatioiden dynamiikka (4100110810),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111010,Luonnonvarat / Soveltava tilastotiede (4100111010),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110410,Luonnonvarat / Vesistƶkuormitus (4100110410),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310710,MaankƤyttƶ ja aluesuunnittelu,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110510,MaaperƤekosysteemit,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310710,"Maaseutu, maankƤyttƶ ja luonnonvarojen hallinta",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210710,Maatalouden teknologiat,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210910,Maidontuotanto,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110710,Metsien terveys ja biodiversiteetti,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110310,MetsƤnhoito,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210410,MetsƤnjalostus,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210610,MetsƤteknologia ja logistiikka,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310510,MetsƤvarojen inventointi ja metsƤsuunnittelu,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211410,Nurmet ja kestƤvƤ maatalous,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610110,PalveluryhmƤt / Esikunta/PƤƤjohtaja (4100610110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610410,PalveluryhmƤt / Henkilƶstƶpalvelut (4100610410),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610710,PalveluryhmƤt / IT ja digitaaliset ratkaisut (4100610710),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610610,PalveluryhmƤt / Talous- ja toimitilat (4100610610),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610210,PalveluryhmƤt / Tutkimus ja asiakkuudet (4100610210),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610510,PalveluryhmƤt / ViestintƤ ja markkinointi (4100610510),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610310,PalveluryhmƤt / Yhteiskuntasuhteet ja hallinto (4100610310),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110210,Peltokasvien tuotanto,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210510,Puutarhateknologiat,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110810,Riistapopulaatioiden dynamiikka,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111010,Soveltava tilastotiede,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510110,TIPA Esikunta,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210110,TUJA Esikunta,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610610,Talous- ja toimitilat,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510310,Tilastopalvelut / Biotalouden kannattavuus (4100510310),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510210,Tilastopalvelut / Biotalouden tilastot (4100510210),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510110,Tilastopalvelut / TIPA Esikunta (4100510110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510410,Tilastopalvelut / Tilastotuotannon menetelmƤt (4100510410),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510410,Tilastotuotannon menetelmƤt,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211610,TuotantoelƤinten hyvinvointi,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211210,TuotantojƤrjestelmƤt / Biojalostusteknologiat ja tuotteet (4100211210),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211110,TuotantojƤrjestelmƤt / Bioraaka-aineiden rakenne ja ominaisuudet (4100211110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211310,TuotantojƤrjestelmƤt / Elintarvikkeiden prosessointi ja laatu (4100211310),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210310,TuotantojƤrjestelmƤt / ElƤingenetiikka (4100210310),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210210,TuotantojƤrjestelmƤt / Kasvigenetiikka (4100210210),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211010,TuotantojƤrjestelmƤt / Liha- ja non-food elƤintuotanto (4100211010),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210710,TuotantojƤrjestelmƤt / Maatalouden teknologiat (4100210710),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210910,TuotantojƤrjestelmƤt / Maidontuotanto (4100210910),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210410,TuotantojƤrjestelmƤt / MetsƤnjalostus (4100210410),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210610,TuotantojƤrjestelmƤt / MetsƤteknologia ja logistiikka (4100210610),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210510,TuotantojƤrjestelmƤt / Puutarhateknologiat (4100210510),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210110,TuotantojƤrjestelmƤt / TUJA Esikunta (4100210110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210810,TuotantojƤrjestelmƤt / Vesiviljelyratkaisut (4100210810),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610210,Tutkimus ja asiakkuudet,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410510,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut HaapastensyrjƤ, Suonenjoki (4100410510)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411110,Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Helsinki (4100411110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410610,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Joensuu, Savonlinna (4100410610)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411210,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Jokioinen, Piikkiƶ (4100411210)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410910,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Maaninka, Kainuu (4100410910)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411010,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Oulu, Siikajoki (4100411010)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410410,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Parkano, Kokkola, SeinƤjoki (4100410410)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410710,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Rovaniemi, Inari, Utsjoki (4100410710)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410110,Tutkimusinfrastruktuuripalvelut / INFRA Esikunta (4100410110),, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410810,"Tutkimusinfrastruktuuripalvelut / Vesiviljely Enonkoski, Laukaa, Taivalkoski, Keminmaa (4100410810)",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111210,Vaelluskalat ja rakennetut joet,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110410,Vesistƶkuormitus,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410810,"Vesiviljely Enonkoski, Inari, Keminmaa, Laukaa, Paltamo, Taivalkoski",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410810,"Vesiviljely Enonkoski, Laukaa, Taivalkoski, Keminmaa",, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210810,Vesiviljelyratkaisut,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610510,ViestintƤ ja markkinointi,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310410,Virkistys ja luontoarvot,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610310,Yhteiskuntasuhteet ja hallinto,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100311010,YmpƤristƶ- ja luonnonvaratalous,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310310,Yritys- ja ympƤristƶtalous,, +Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310910,Yritystalous ja liiketoimintamallit,, +LƤƤkealan turvallisuus- ja kehittƤmiskeskus,The Finnish Medicines Agency,,558005,,,,, +Maanmittauslaitos,National Land Survey of Finland,,4020217,,,,, +Maanpuolustuskorkeakoulu,National Defence University,,02358,,,,, +Maaseutuvirasto,Agency for Rural Affairs in Finland,Landsbygdsverket,24053596,,,,, +Maj ja Tor Nesslingin SƤƤtiƶ,Maj and Tor Nessling Foundation,,02211186,,,,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,,,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,421,Ajoneuvo- ja konetekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,450,Ajoneuvo- ja konetekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,472,Akuuttihoidon tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,431,Automaatiotekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,476,BiRaYa -tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,461,Clean -tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,463,"ETV, Digi ja Visu -tiimi",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,267,Electria,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,293,Taloussuunnittelu- ja seurantapalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,294,Kirjanpito- ja maksuliikennepalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,315,Kiinteistƶpalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,319,Koulutuksen kehittƤmispalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,433,Elektroniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,423,EsittƤvƤ taide,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,329,Hakijapalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,333,Opintotoimistot,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,340,ViestintƤyksikkƶ,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,341,"Kirjasto- ja tietopalvelut, TeKu-tiimi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,343,"Kirjasto- ja tietopalvelut, Sote-tiimi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,344,"Kirjasto- ja tietopalvelut, Lite-tiimi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,357,Opiskeluhyvinvointitoiminta,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,358,Korkeakoulupalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,400,Liiketoimintapalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,402,Strategia- ja kehityspalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,403,Oppimistoiminta,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,404,TKI-toiminta,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,405,Liiketoiminta,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,406,Tuotantotalous,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,407,Rakentaminen ja arkkitehtuuri,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,408,Puhtaat teknologiat,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,409,Liikkuminen ja toimintakyky,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,478,Hankintapalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,478,Hankintapalvelut;;,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,190,Henkilƶstƶpalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,447,Hyvinvointi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,451,ICT ja tuotantotalous,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,436,"Informaatiotekniikka ja pelisovellukset, tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,477,Jatkuvan oppimisen kehittƤminen,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,477,Jatkuvan oppimisen kehittƤminen;;Development of L,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,405,Jatkuvan oppimisen palvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,219,JƤrjestelmƤyllƤpito,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,418,KansainvƤlinen liiketoiminta,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,437,Kehitysjohtajan yksikkƶ,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,449,Kiinteistƶ- ja rakennusala,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,454,Kiinteistƶ- ja talotekniikan tiimi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,410,Kiinteistƶ- ja talotekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,294,Kirjanpito- ja maksuliikennepalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,207,Kirjasto- ja tietopalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,343,"Kirjasto- ja tietopalvelut, Arabia-Myllypuro",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,344,"Kirjasto- ja tietopalvelut, Karamalmi-MyyrmƤki",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,344,"Kirjasto- ja tietopalvelut, Lite-tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,343,"Kirjasto- ja tietopalvelut, Sote-tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,341,"Kirjasto- ja tietopalvelut, TeKu-tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,471,Kliinisen hoidon tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,471,Kliinisen hoidon tiimi;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,411,Kliinisen hoitotyƶn ja ensihoidon palvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,412,Osallistuminen ja toimintakyky,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,413,Terveyden edistƤmisen palvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,414,Musiikki,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,415,Tieto- ja viestintƤtekniikka,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,416,Kulttuuripalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,417,SƤhkƶ- ja automaatiotekniikka,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,418,KansainvƤlinen liiketoiminta,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,419,Media,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,420,Terveysalan tutkimuspalvelut ja palvelujohtaminen,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,421,Ajoneuvo- ja konetekniikka,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,422,Sosiaalinen hyvinvointi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,423,EsittƤvƤ taide,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,424,Konservointi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,465,"Konservointi, 3D, XR -tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,358,Korkeakoulupalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,416,Kulttuuripalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,444,Kulttuuripalvelut ja musiikki,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,460,Kulttuurituotannon ja vaatetuksen tiimi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,425,Kulttuurituotanto,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,426,Muotoilu,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,427,Vaatetus,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,428,"Kv-liiketoiminta, hallinto",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,474,Kuntoutuksen tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,474,Kuntoutuksen tiimi;;,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,446,Kuntoutus ja tutkiminen,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,429,"Kv-liiketoiminta, YAMK ja TKI",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,430,SƤhkƶvoimatekniikka,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,431,Automaatiotekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,428,"Kv-liiketoiminta, hallinto",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,208,Kv-tukipalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,220,KƤyttƤjƤtuki,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,442,Laki- ja arkistointipalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,468,Liiketalouden AMK-tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,468,Liiketalouden AMK-tiimi;;,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,469,Liiketalouden YAMK- ja KV-tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,452,Liiketalous,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,405,Liiketoiminta,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,409,Liikkuminen ja toimintakyky,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,457,Me3 -tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,419,Media,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,443,"Media, muotoilu ja konservointi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,435,Mediatekniikan tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,426,Muotoilu,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,464,"Muotoilu, teknot ja yleisaineet -tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,470,Musiikin tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,470,Musiikin tiimi;;,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,414,Musiikki,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,438,NƤyttƶkeskus,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,209,Opintoasiainpalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,333,Opiskelija- ja hakijapalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,357,Opiskeluhyvinvointitoiminta,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,403,Oppimistoiminta,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,319,Oppimistoimintapalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,412,Osallistuminen ja toimintakyky,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,459,Palkanlaskentapalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,448,Puhtaat ja ƤlykkƤƤt ratkaisut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,408,Puhtaat teknologiat,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,475,PƤƤasia -tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,475,PƤƤasia -tiimi;;,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,407,Rakentaminen ja arkkitehtuuri,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,455,Rakentaminen ja arkkitehtuuri -tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,180,Rehtorin yksikkƶ,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,462,Smart -tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,458,"Softa, Hyte ja Smart -tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,467,Sosiaalialan tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,422,Sosiaalinen hyvinvointi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,402,Strategia- ja kehityspalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,417,SƤhkƶ- ja automaatiotekniikka,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,432,"SƤhkƶ- ja automaatiotekniikka, TKi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,433,Elektroniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,430,SƤhkƶvoimatekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,453,TKI-palvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,404,TKI-toiminta,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,189,Talous- ja hallintopalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,293,Taloussuunnittelu- ja seurantapalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,413,Terveyden edistƤmisen palvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,473,Terveyden edistƤmisen tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,445,Terveys,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,420,Terveysalan tutkimuspalvelut ja palvelujohtaminen,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,221,Tiedonhallinta- ja jƤrjestelmƤpalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,415,Tieto- ja viestintƤtekniikka,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,191,Tietohallintopalvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,434,Tietoverkkotekniikan tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,435,Mediatekniikan tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,436,"Informaatiotekniikka ja pelisovellukset, tiimi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,437,Kehitysjohtajan yksikkƶ,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,438,NƤyttƶkeskus,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,,,http://isni.org/isni/0000000094586751,1473 -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10200,Hallintoyksikkƶ,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10210,KehittƤminen,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10211,ViestintƤ,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10212,Hallintopalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,315,Toimitilapalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,456,Tuotantotalouden tiimi,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,406,Tuotantotalous,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,400,TƤydennyskoulutus- ja yrityspalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,479,TƤydennyskoulutus- ja yrityspalvelut,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,427,Vaatetus,, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,466,"Vanhustyƶ, kuntoutus, toimintaterapia -tiimi",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,466,"Vanhustyƶ, kuntoutus, toimintaterapia -tiimi;;",, +Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,340,ViestintƤyksikkƶ,, +MetsƤntutkimuslaitos,Finnish Forest Research Institute,Skogsforskningsinstitutet,404001,,,,http://isni.org/isni/0000000122651136,TWV0c8OkbnR1dGtpbXVzbGFpdG9z +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,,,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102200,Ammatillinen opettajakoulutus,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102200,Ammatillinen opettajakoulutus;;Vocational Teacher,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104400,Ammatillisen opettajankoulutuksen yksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102600,Energia ja automaatio,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102600,Energia ja automaatio;;Energy and Automation,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102120,Hallintopalvelut,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102120,Hallintopalvelut;;Administration (Services),, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102000,Hallintoyksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104000,Hallintoyksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102000,Hallintoyksikkƶ;;Administrative Unit,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102300,Hoitoalat (Oulu ja Oulainen),, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102300,Hoitoalat (Oulu ja Oulainen);;Health Care and Nurs,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102750,Informaatioteknologia,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102750,Informaatioteknologia;;Information Technology,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104800,Informaatioteknologian yksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102100,KehittƤminen,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102100,KehittƤminen;;Development,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102700,Konetekniikka,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102700,Konetekniikka;;Mechanical Engineering,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102900,Kulttuuri,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102900,Kulttuuri;;Media and Performing Arts,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104900,Kulttuurialan yksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104600,Liiketalouden yksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102500,Liiketalous,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102500,Liiketalous;;Business,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102800,Luonnonvara-ala,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102800,Luonnonvara-ala;;Natural Resources,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102650,Rakentamistekniikka,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102650,Rakentamistekniikka;;Civil EngineeringƤ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104500,Sosiaali- ja terveysalan yksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102400,Sosiaaliala ja kuntoutus,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102400,Sosiaaliala ja kuntoutus;;Social Services and Reha,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104700,Tekniikan ja Luonnonvara-alan yksikkƶ,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102350,Terveydenhuollon erityisalat,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102350,Terveydenhuollon erityisalat;;Special Fields in He,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102110,"ViestintƤ, markkinointi ja aluevaikuttavuus",, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102110,"ViestintƤ, markkinointi ja aluevaikuttavuus;;Comm",, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10220,Ammatillinen opettajakoulutus,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10230,Hoitoalat (Oulu ja Oulainen),, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10235,Terveydenhuollon erityisalat,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10240,Sosiaaliala ja kuntoutus,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10250,Liiketalous,, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10260,Energia ja automaatio,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10265,Rakentamistekniikka,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10270,Konetekniikka,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10212,Hallintopalvelut,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10200,Hallintoyksikkƶ,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10230,Hoitoalat (Oulu ja Oulainen),, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10275,Informaatioteknologia,, -Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10280,Luonnonvara-ala,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10210,KehittƤminen,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10270,Konetekniikka,, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10290,Kulttuuri,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,,,http://isni.org/isni/0000000404505858,1478 -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24000,HUMANISTINEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240000,Humanistinen tiedekunta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240000,2400000,Humanistinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240000,2400001,HuTK koulutus,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10250,Liiketalous,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10280,Luonnonvara-ala,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10265,Rakentamistekniikka,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10240,Sosiaaliala ja kuntoutus,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10235,Terveydenhuollon erityisalat,, +Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10211,ViestintƤ,, +Oulun yliopisto,University of Oulu,,01904,,,,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406140,6Genesis Flagship,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240614,6Genesis Flagship,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240807,AIKOPA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408071,AIKOPA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24051,ARKKITEHTUURIN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240030,Aate- ja oppihistoria,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240030,2400300,Aate- ja oppihistoria,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240031,Englantilainen filologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240031,2400310,Englantilainen filologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240032,Germaaninen filologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240032,2400320,Germaaninen filologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240033,Historia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240033,2400330,Historia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240034,Informaatiotutkimus ja viestintƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240034,2400340,Informaatiotutkimus ja viestintƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240034,2400341,TiedeviestinnƤn maisteriohjelma,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240035,Kirjallisuus ja elokuvatutkimus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240035,2400350,Kirjallisuus ja elokuvatutkimus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240036,Kulttuuriantropologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240036,2400360,Kulttuuriantropologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240037,Logopedia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240037,2400370,Logopedia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240038,Pohjoismainen filologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240038,2400380,Pohjoismainen filologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240039,Suomen kieli,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240039,2400390,Suomen kieli,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400331,Aate- ja oppihistoria,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240840,Aikuiskoulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408400,Aikuiskoulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406130,Allied ICT Finland,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240613,Allied ICT Finland,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407310,Alueellinen erinomaisuus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240731,Alueellinen erinomaisuus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2430,Alueyksikƶt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408061,Analyyttinen kemia/bioanalytiikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240041,Arkeologia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240041,2400410,Arkeologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240020,Giellagas-instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240020,2400200,Giellagas-instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24010,KASVATUSTIETEIDEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24010,24011,KASVATUSTIETEIDEN TIEDEKUNTA ilman harjoittelukouluja,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24011,240100,Kasvatustieteiden tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240100,2401000,Kasvatustieteiden tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240100,2401001,KTK koulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24011,240103,Oppiminen ja oppimisprosessit,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240103,2401030,Oppiminen ja oppimisprosessit,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24011,240104,"Opettajat, opettaminen ja kasvatusyhteisƶt",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240104,2401040,"Opettajat, opettaminen ja kasvatusyhteisƶt",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24011,240105,"Kasvatuksen arvot, aatteet ja yhteiskunnalliset kontekstit",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240105,2401050,"Kasvatuksen arvot, aatteet ja yhteiskunnalliset kontekstit",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24010,24015,Oulun yliopiston harjoittelukoulut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24015,240150,Oulun yliopiston harjoittelukoulu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240150,2401500,Oulun yliopiston harj.koulu yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24015,240151,"Oulun normaalikoulu, Linnanmaa (0-6)",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240151,2401510,"Oulun normaalikoulu, Linnanmaa (0-6)",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24015,240152,"Oulun normaalikoulu, Linnanmaa (7-9)",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240152,2401520,"Oulun normaalikoulu, Linnanmaa (7-9)",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24015,240153,"Oulun normaalikoulu, Koskela 1-6",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240153,2401530,"Oulun normaalikoulu, Koskela 1-6",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24015,240155,"Oulun normaalikoulu, lukio",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240155,2401550,"Oulun normaalikoulu, lukio",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24020,LUONNONTIETEELLINEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240200,Luonnontieteellinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240200,2402000,Luonnontieteellinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240211,Kemikaalipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240211,2402110,Kemikaalipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240206,Maantiede,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240206,2402060,Maantiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400335,Arkeologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240510,Arkkitehtuuri,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405100,Arkkitehtuuri,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24051,240510,Arkkitehtuurin tiedekunta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240510,2405100,Arkkitehtuurin tiedekunta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240770,2407701,Arktinen lƤƤketiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409390,Asiakaspalvelu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409305,Asiakirjapalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409140,Aulapalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409125,Autopaikat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240225,Avaruusfysiikan ja tƤhtitieteen tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402250,Avaruusfysiikan ja tƤhtitieteen tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240215,Avaruusilmasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402150,Avaruusilmasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240840,2408406,Avoin yliopisto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24031,BIOKEMIAN JA MOLEKYYLILƃā€žĆƒā€žKETIETEEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24031,BIOKEMIAN JA MOLEKYYLILƄƄKETIETEEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24066,Biocenter,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240681,Biocenter,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406810,Biocenter,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24067,Biocenter Core,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240670,Biocenter Core,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406700,Biocenter Core,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240262,Biocomputing & COPD,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402620,Biocomputing & COPD,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24089,Biodiversiteettiyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240212,Biodiversiteettiyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402120,Biodiversiteettiyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240201,Biokemian ja molekyylilƤƤketieteen tiedekunta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402010,Biokemian ja molekyylilƤƤketieteen tiedekunta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240230,Biologian ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402300,Biologian ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240342,BiolƤƤketieteellinen tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403420,BiolƤƤketieteellinen tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240267,Biomedical structural biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402670,Biomedical structural biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240599,Biomimetiikka ja ƤlykkƤƤt jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405990,Biomimetiikka ja ƤlykkƤƤt jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240343,Biopankki Borealis,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403430,Biopankki Borealis,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240585,Biosignaalien analyysi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240585,2405850,Biosignaalien analyysi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405851,Biosignaalien analyysi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240775,CEE InnovaatiokeskittymƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240775,2407750,CEE InnovaatiokeskittymƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24080,240806,CEMIS-Oulu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240806,2408060,CEMIS-Oulu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240309,CERH,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403090,CERH,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403007,CHT,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240700,CIE 31.12.2015 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240587,CWC - Radioteknologiat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405870,CWC - Radioteknologiat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240612,CWC - Verkot ja jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406120,CWC - Verkot ja jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402740,Cancer Genomics,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240274,Cancer Genomics,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240271,Cell-matrix biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402710,Cell-matrix biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240720,Center for Health and Technology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240720,2407200,Center for Health and Technology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409690,Controller lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240969,Controller lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240969,Controlling,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409690,Controlling,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240273,Disease Networks,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402730,Disease Networks,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240271,ECM and Hypoxia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402710,ECM and Hypoxia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240334,ELITE,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403340,ELITE,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240260,Eklund research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240260,2402600,Eklund research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240213,Ekologia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240213,2402130,Ekologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240214,Genetiikka ja fysiologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240214,2402140,Genetiikka ja fysiologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240215,Avaruusilmasto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240215,2402150,Avaruusilmasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402130,Ekologian ja genetiikan tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240213,Ekologian ja genetiikan tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240583,Elektroniikan piirit ja jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405830,Elektroniikan piirit ja jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240208,"Empiirinen ohjelmistotuotanto ohjelmistoissa, jƤr",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402080,"Empiirinen ohjelmistotuotanto ohjelmistoissa, jƤr",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240208,"Empiirinen ohjelmistotuotanto ohjelmistoissa, jƤrjestelmissƤ ja palveluissa",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240208,2402080,"Empiirinen ohjelmistotuotanto ohjelmistoissa, jƤrjestelmissƤ ja palveluissa",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240548,Energia- ja ympƤristƶtekniikan tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240548,2405480,Energia- ja ympƤristƶtekniikan tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240031,Englantilainen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400310,Englantilainen filologia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240216,EpƤorgaaninen kemia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240216,2402160,EpƤorgaaninen kemia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240217,KestƤvƤ kemia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240217,2402170,KestƤvƤ kemia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240207,Matematiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240207,2402070,Matematiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240218,Sovellettu ja laskennallinen matematiikka 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240218,2402180,Sovellettu ja laskennallinen matematiikka 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240219,Sovellettu matematiikka ja tilastotiede,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240219,2402190,Sovellettu matematiikka ja tilastotiede,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240220,Neurobiofysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240220,2402200,Neurobiofysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240221,Molekylaariset jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240221,2402210,Molekylaariset jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240222,NMR Spectroskopia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240222,2402220,NMR Spectroskopia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240223,Teoreettinen fysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240223,2402230,Teoreettinen fysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240224,TƤhtitiede,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240224,2402240,TƤhtitiede,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240225,IonosfƤƤrifysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240225,2402250,IonosfƤƤrifysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240230,Biologian ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240230,2402300,Biologian ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240231,Fysiikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240231,2402310,Fysiikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240232,Kemian ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240232,2402320,Kemian ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240233,Maantieteen ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240233,2402330,Maantieteen ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240234,Matematiikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240234,2402340,Matematiikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240234,2402341,OuLUMA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24025,KAIVANNAISALAN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2440,Erillisyksikƶt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24064,Eudaimonia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240640,Eudaimonia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406400,Eudaimonia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240408,Executive Education,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2404080,Executive Education,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240542,FabLab TTK,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405420,FabLab TTK,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405943,Fablab Infrastruktuuri,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240231,Fysiikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402310,Fysiikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240214,Genetiikka ja fysiologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240214,2402140,Genetiikka ja fysiologia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24025,240250,Geotieteen tutkimusryhmƤ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240250,2402500,Geotieteen tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240250,2402501,KaTK koulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24025,240251,Kaivos- ja rikastustekniikan tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240251,2402510,Kaivos- ja rikastustekniikan tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24030,LƄƄKETIETEELLINEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240300,LƤƤketieteellinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240300,2403000,LƤƤketieteellinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240300,2403002,LƤƤketieteen ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240300,2403003,HammaslƤƤketieteen ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240300,2403004,Terveystieteiden ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240305,Suun terveyden tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240305,2403050,Suun terveyden tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240309,CERH,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240309,2403090,CERH,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240331,Medical Research Center (MRC),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240331,2403310,Medical Research Center (MRC),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240332,Pohjois-Suomen syntymƤkohortti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240332,2403320,Pohjois-Suomen syntymƤkohortti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240333,SisƤtautien tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240333,2403330,SisƤtautien tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240334,ELITE,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240334,2403340,ELITE,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240335,KNK- ja silmƤtautien tutkimusyksikkƶ 31.1.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240335,2403350,KNK- ja silmƤtautien tutkimusyksikkƶ 31.1.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240336,Neurotieteen tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240336,2403360,Neurotieteen tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240337,PEDEGO-tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240337,2403370,PEDEGO-tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240338,"Kirurgian, anestesiologian ja tehohoidon tutkimusyksikkƶ",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240338,2403380,"Kirurgian, anestesiologian ja tehohoidon tutkimusyksikkƶ",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240032,Germaaninen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240032,2400320,Germaaninen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400311,Germaaninen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240020,Giellagas-instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400200,Giellagas-instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402650,Glycan biosynthesis,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240265,Glycan biosynthesis,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24000,HUMANISTINEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240970,Hallituksen vararahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409700,Hallituksen vararahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403003,HammaslƤƤketieteen ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409920,Hankinnat ja kilpailutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240992,Hankinnat ja kilpailutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408101,Hankintapalvelu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409160,Hankintapalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24192,Henkilƶstƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240941,2409471,Henkilƶstƶhallinto keskitetyt palvelut (KEPA),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240922,Henkilƶstƶpalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409220,Henkilƶstƶpalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240941,Henkilƶstƶpalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409471,Henkilƶstƶpalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240261,Hiltunen research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240261,2402610,Hiltunen research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240033,Historia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400330,Historia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240033,"Historian, kulttuurin ja viestintƤtieteiden tutki",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403390,Hoitotieteen ja terveyshallintotieteen tutkimusyks,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240339,Hoitotieteen ja terveyshallintotieteen tutkimusyks,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240339,Hoitotieteen ja terveyshallintotieteen tutkimusyksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240339,2403390,Hoitotieteen ja terveyshallintotieteen tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240340,"LƤƤketieteellisen kuvantamisen, fysiikan ja tekniikan tutkimusyksikkƶ",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240340,2403400,"LƤƤketieteellisen kuvantamisen, fysiikan ja tekniikan tutkimusyksikkƶ",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240341,SyƶvƤn ja translationaalisen lƤƤketieteen tutkimyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240341,2403410,SyƶvƤn ja translationaalisen lƤƤketieteen tutkimyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240342,BiolƤƤketieteellinen tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240342,2403420,BiolƤƤketieteellinen tutkimusyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240343,Biopankki Borealis,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240343,2403430,Biopankki Borealis,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24040,OULUN YLIOPISTON KAUPPAKORKEAKOULU,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240400,Oulun yliopiston kauppakorkeakoulu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240400,2404000,Oulun yliopiston kauppakorkeakoulu yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400001,HuTK koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240000,Humanistinen tiedekunta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400000,Humanistinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402700,Hypoxia & collagens,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240270,Hypoxia & collagens,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402630,Hypoxia response,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240263,Hypoxia response,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409380,ICT kƤyttƶpalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24197,ICT-palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240034,Informaatiotutkimus ja viestintƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240034,2400340,Informaatiotutkimus ja viestintƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400332,Informaatiotutkimus ja viestintƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24068,Infotech,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240680,Infotech,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406800,Infotech,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409380,Infrapalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2445,Infrastruktuuri,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240949,Innovaatio- ja yrittƤjyyspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240949,2409245,Innovaatio- ja yrittƤjyyspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24290,Innovaatiokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402741,Integrin functions,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409492,International Avenue,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240225,IonosfƤƤrifysiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402250,IonosfƤƤrifysiikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240402,Johtamisen ja kansainvƤlisen liiketoiminnan yksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240402,2404020,Johtamisen ja kansainvƤlisen liiketoiminnan yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240403,Taloustieteen yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240403,2404030,Taloustieteen yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240404,Laskentatoimen yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240404,2404040,Laskentatoimen yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240405,Markkinoinnin yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240405,2404050,Markkinoinnin yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240406,Rahoituksen yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240406,2404060,Rahoituksen yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240407,Martti Ahtisaari instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240407,2404070,Martti Ahtisaari Instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240408,Executive Education,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240408,2404080,Executive Education,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24050,TEKNILLINEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240500,Teknillinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240500,2405000,Teknillinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240541,Tiedekuntalaboratorio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240541,2405410,Tiedekuntalaboratorio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240542,FabLab TTK,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240542,2405420,FabLab TTK,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240543,Konetekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240543,2405430,Konetekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240544,Prosessi- ja ympƤristƶtekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240544,2405440,Prosessi- ja ympƤristƶtekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240545,Tuotantotalouden ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240545,2405450,Tuotantotalouden ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240546,Tohtorikoulutettavat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240546,2405460,Tohtorikoulutettavat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240547,Opetustilat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240547,2405470,Opetustilat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240548,Energia- ja ympƤristƶtekniikan tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240548,2405480,Energia- ja ympƤristƶtekniikan tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240538,Tyƶpaja,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240538,2405380,Tyƶpaja,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240550,Prosessi- ja ympƤristƶtekniikan osasto (ei kƤytƶssƤ),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240550,2405500,Prosessi- ja ympƤristƶtekniikan os. yht. (ei kƤytƶssƤ),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240550,2405610,Minipilot-rikastamo (ei kƤytƶssƤ),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240532,Mekatroniikka ja konediagnostiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240532,2405320,Mekatroniikka ja konediagnostiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240533,Koneensuunnittelu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240533,2405330,Koneensuunnittelu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240534,Materiaali- ja tuotantotekniikan tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240534,2405340,Materiaali- ja tuotantotekniikan tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240536,Rakenteet ja rakentamisteknologia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240536,2405360,Rakennesuunnittelun ja rakentamisteknologian tutkimusryhmƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240552,Vesi- ja ympƤristƶtekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240552,2405520,Vesi- ja ympƤristƶtekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240554,Kemiallinen prosessitekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240554,2405540,Kemiallinen prosessitekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240555,YmpƤristƶ- ja kemiantekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240555,2405550,YmpƤristƶ- ja kemiantekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240557,Kuitu- ja partikkelitekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240557,2405570,Kuitu- ja partikkelitekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240558,SƤƤtƶtekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240558,2405580,SƤƤtƶtekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240559,Systeemitekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240559,2405590,Systeemitekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240560,Prosessimetallurgia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240560,2405600,Prosessimetallurgia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240570,Tuotantotalous,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240570,2405700,Tuotantotalous,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240579,Konepaja- ja metalliosaamisen innovaatiokeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240579,2405790,Konepaja- ja metalliosaamisen innovaatiokeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24031,BIOKEMIAN JA MOLEKYYLILƄƄKETIETEEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240201,Biokemian ja molekyylilƤƤketieteen tiedekunta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240201,2402010,Biokemian ja molekyylilƤƤketieteen tiedekunta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240260,Eklund research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240260,2402600,Eklund research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240261,Hiltunen research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240261,2402610,Hiltunen research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2460,Johto ja palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24092,Johto ja palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240921,Johto ja palvelut - yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409210,Johto ja palvelut - yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240611,Jokapaikan tietotekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406110,Jokapaikan tietotekniikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240262,Juffer and Ohlmeier research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240262,2402620,Juffer and Ohlmeier research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24025,KAIVANNAISALAN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24010,KASVATUSTIETEIDEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24011,KASVATUSTIETEIDEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24011,KASVATUSTIETEIDEN TIEDEKUNTA ilman harjoittelukoul,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24010,24011,KASVATUSTIETEIDEN TIEDEKUNTA ilman harjoittelukouluja,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240335,KNK- ja silmƤtautien tutkimusyksikkƶ 31.1.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240335,2403350,KNK- ja silmƤtautien tutkimusyksikkƶ 31.1.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240731,KSI AluekehittƤminen,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240731,2407310,KSI AluekehittƤminen,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240734,KSI Maanalainen fysiikka (CUPP),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240734,2407340,KSI Maanalainen fysiikka (CUPP),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240733,KSI MikroyrittƤjyys (MikroY),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407330,KSI MikroyrittƤjyys (MikroY),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240736,KSI Raahe,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240736,2407360,KSI Raahe,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240732,KSI Tulevaisuuden tuotantoteknologiat (FMT),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407320,KSI Tulevaisuuden tuotantoteknologiat (FMT),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401001,KTK koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240250,2402501,KaTK koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402500,Kaivannaisala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240250,Kaivannaisala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24025,240251,Kaivos- ja rikastustekniikan tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240251,2402510,Kaivos- ja rikastustekniikan tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24080,Kajaanin yliopistokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240800,Kajaanin yliopistokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408000,Kajaanin yliopistokeskus yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409155,Kalusteasentajat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240925,Kampuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24198,Kampuspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240263,Karppinen research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240263,240002630,Karppinen research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240264,Kastaniotis research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240264,2402640,Kastaniotis research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240105,"Kasvatuksen arvot, aatteet ja yhteiskunnalliset ko",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401050,"Kasvatuksen arvot, aatteet ja yhteiskunnalliset ko",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24011,240105,"Kasvatuksen arvot, aatteet ja yhteiskunnalliset kontekstit",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240105,2401050,"Kasvatuksen arvot, aatteet ja yhteiskunnalliset kontekstit",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240100,Kasvatustieteiden tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401000,Kasvatustieteiden tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409391,KehittƤmispalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240265,Kellokumpu and Glumoff research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240265,2402650,Kellokumpu and Glumoff research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240554,Kemiallinen prosessitekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405540,Kemiallinen prosessitekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240232,Kemian ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240232,2402320,Kemian ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240540,Kemian ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405400,Kemian ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240211,Kemikaalipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240211,2402110,Kemikaalipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24073,Kerttu Saalasti instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240730,Kerttu Saalasti instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407300,Kerttu Saalasti instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240217,KestƤvƤ kemia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240217,2402170,KestƤvƤ kemia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405530,KestƤvƤ kemia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240553,KestƤvƤ kemia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240820,Kieli- ja viestintƤkoulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408200,Kieli- ja viestintƤkoulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240031,Kielten ja kirjallisuuden tutkimusyksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240266,Kietzmann research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240266,2402660,Kietzmann research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409250,Kiinteistƶ- ja toimitilajohtaminen,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240911,Kiinteistƶt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409110,Kiinteistƶt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240035,Kirjallisuus ja elokuvatutkimus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240035,2400350,Kirjallisuus ja elokuvatutkimus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400312,Kirjallisuus ja elokuvatutkimus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24081,Kirjasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240810,Kirjasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408100,Kirjasto yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408150,Kirjaston hallintopalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403380,"Kirurgian, anestesiologian ja tehohoidon tutkimusy",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240338,"Kirurgian, anestesiologian ja tehohoidon tutkimusy",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240338,"Kirurgian, anestesiologian ja tehohoidon tutkimusyksikkƶ",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240338,2403380,"Kirurgian, anestesiologian ja tehohoidon tutkimusyksikkƶ",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24083,Koe-elƤinkeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240830,Koe-elƤinkeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408300,Koe-elƤinkeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240533,Koneensuunnittelu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240533,2405330,Koneensuunnittelu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240584,KonenƤkƶ ja signaalianalyysi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405840,KonenƤkƶ ja signaalianalyysi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240579,Konepaja- ja metalliosaamisen innovaatiokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240579,2405790,Konepaja- ja metalliosaamisen innovaatiokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240543,Konetekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405430,Konetekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24193,Koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408104,Koulutus- ja tietopalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240923,Koulutuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409230,Koulutuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409412,Koulutuspalvelut HKT 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409432,Koulutuspalvelut Kontinkangas 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409422,Koulutuspalvelut LUTK 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409930,Koulutuspalvelut OAMK lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240993,Koulutuspalvelut OAMK lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409442,Koulutuspalvelut TTK 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409660,"Koulutuspalvelut lƤhipalvelutiimi KTK, HuTK, OyKK",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240966,"Koulutuspalvelut lƤhipalvelutiimi KTK, HuTK, OyKK",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409670,"Koulutuspalvelut lƤhipalvelutiimi LTK, BMTK",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240967,"Koulutuspalvelut lƤhipalvelutiimi LTK, BMTK",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409680,Koulutuspalvelut lƤhipalvelutiimi TSTK,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240968,Koulutuspalvelut lƤhipalvelutiimi TSTK,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240965,Koulutuspalvelut lƤhipalvelutiimi TTK ja LuTK,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409650,Koulutuspalvelut lƤhipalvelutiimi TTK ja LuTK,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409650,"Koulutuspalvelut lƤhipalvelutiimi TTK, LuTK, KaTK",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240965,"Koulutuspalvelut lƤhipalvelutiimi TTK, LuTK, KaTK",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240923,Koulutuspalvelut yhteiset palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409230,Koulutuspalvelut yhteiset palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240842,Koulutusvienti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408420,Koulutusvienti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240557,Kuitu- ja partikkelitekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405570,Kuitu- ja partikkelitekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240036,Kulttuuriantropologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240036,2400360,Kulttuuriantropologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400334,Kulttuuriantropologia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240267,Kursula research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240267,2402670,Kursula research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408062,Kuvantavat mittaukset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2417000,Kvantum Institute,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,241700,Kvantum Institute,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24170,Kvantum Institute,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405970,KƤyttƶliittymƤt ja ihmiskeskeinen digitalisaati,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240597,KƤyttƶliittymƤt ja ihmiskeskeinen digitalisaati,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240597,KƤyttƶliittymƤt ja ihmiskeskeinen digitalisaatio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240597,2405970,KƤyttƶliittymƤt ja ihmiskeskeinen digitalisaatio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24088,LEAF,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240778,LEAF,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407780,LEAF,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24020,LUONNONTIETEELLINEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408103,Lainaus- ja neuvontapalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240921,Laki ja sopimus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409210,Laki ja sopimus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24191,Laki- ja sopimuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240404,Laskentatoimen yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240404,2404040,Laskentatoimen yksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240268,Lehtiƶ research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240268,2402680,Lehtiƶ research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240037,Logopedia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400370,Logopedia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240200,Luonnontieteellinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402000,Luonnontieteellinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24030,Lƃā€žĆƒā€žKETIETEELLINEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24030,LƄƄKETIETEELLINEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24093,LƤhipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240300,LƤƤketieteellinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403000,LƤƤketieteellinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403400,"LƤƤketieteellisen kuvantamisen, fysiikan ja tekn",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240340,"LƤƤketieteellisen kuvantamisen, fysiikan ja tekn",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240340,"LƤƤketieteellisen kuvantamisen, fysiikan ja tekniikan tutkimusyksikkƶ",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240340,2403400,"LƤƤketieteellisen kuvantamisen, fysiikan ja tekniikan tutkimusyksikkƶ",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403002,LƤƤketieteen ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408060,MITY,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240806,MITY,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240206,Maantiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402060,Maantiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240233,Maantieteen ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240233,2402330,Maantieteen ala,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240269,Manninen research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240269,2402690,Manninen research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240405,Markkinoinnin yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240405,2404050,Markkinoinnin yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240402,"Markkinoinnin, johtamisen ja kansainvƤlisen liike",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2404020,"Markkinoinnin, johtamisen ja kansainvƤlisen liike",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2404070,Martti Ahtisaari Instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240407,Martti Ahtisaari instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402070,Matemaattisten tieteiden tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240207,Matemaattisten tieteiden tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240234,Matematiikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402340,Matematiikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240207,Matematiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240207,2402070,Matematiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240534,Materiaali- ja konetekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405340,Materiaali- ja konetekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240534,Materiaali- ja tuotantotekniikan tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240534,2405340,Materiaali- ja tuotantotekniikan tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24079,Materiaalianalyysikeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407900,Materiaalianalyysikeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240790,Materiaalianalyysikeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240331,Medical Research Center (MRC),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403310,Medical Research Center (MRC),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240532,Mekatroniikka ja konediagnostiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240532,2405320,Mekatroniikka ja konediagnostiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240586,Mikroelektroniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405860,Mikroelektroniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24079,Mikroskopian ja nanoteknologian keskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24079,240790,Mikroskopian ja nanoteknologian keskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240790,2407900,Mikroskopian ja nanoteknologian keskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240550,2405610,Minipilot-rikastamo (ei kƤytƶssƤ),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240264,Mitochondria and lipids,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402640,Mitochondria and lipids,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240221,Molekylaariset jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240221,2402210,Molekylaariset jƤrjestelmƤt,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240270,Myllyharju research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240270,2402700,Myllyharju research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240222,NMR Spectroskopia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240222,2402220,NMR Spectroskopia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240222,NMR-spektroskopia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402220,NMR-spektroskopia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240972,NN rahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409720,NN rahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402210,Nano- ja molekyylisysteemien tutkimusyksikkƶ (NAN,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240221,Nano- ja molekyylisysteemien tutkimusyksikkƶ (NAN,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240220,Neurobiofysiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240220,2402200,Neurobiofysiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240336,Neurotieteen tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403360,Neurotieteen tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240773,NorTech 31.12.2015 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240252,OMS Tutkimuskeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402520,OMS Tutkimuskeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24040,OULUN YLIOPISTON KAUPPAKORKEAKOULU,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24095,Omakatteiset rahastot,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240104,"Opettajat, opettaminen ja kasvatusyhteisƶt",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401040,"Opettajat, opettaminen ja kasvatusyhteisƶt",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409385,Opetuksen IT-palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409385,Opetuksen ja tutkimuksen IT-tukipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409236,Opetuksen tuki,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240547,Opetustilat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405470,Opetustilat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409232,Opintotuki (entinen tyƶelƤmƤpalvelut) 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409237,Opiskelijavalinta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409239,Opiskelun tuki,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409238,Opiskeluoikeuden yllƤpito,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240103,Oppiminen ja oppimisprosessit,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401030,Oppiminen ja oppimisprosessit,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408063,Optinen spektroskopia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240582,Optoelektroniikka ja mittaustekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405820,Optoelektroniikka ja mittaustekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240273,Organogenesis,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402730,Organogenesis,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402341,OuLUMA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24085,Oulangan tutkimusasema,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240771,Oulangan tutkimusasema,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407710,Oulangan tutkimusasema,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240153,"Oulun normaalikoulu, Koskela 1-6",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401530,"Oulun normaalikoulu, Koskela 1-6",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240151,"Oulun normaalikoulu, Linnanmaa (0-6)",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401510,"Oulun normaalikoulu, Linnanmaa (0-6)",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240152,"Oulun normaalikoulu, Linnanmaa (7-9)",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401520,"Oulun normaalikoulu, Linnanmaa (7-9)",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240155,"Oulun normaalikoulu, lukio",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401550,"Oulun normaalikoulu, lukio",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24,Oulun yliopisto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240950,Oulun yliopiston apuraharahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409500,Oulun yliopiston apuraharahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401500,Oulun yliopiston harj.koulu yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240150,Oulun yliopiston harjoittelukoulu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24015,Oulun yliopiston harjoittelukoulut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240400,Oulun yliopiston kauppakorkeakoulu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2404000,Oulun yliopiston kauppakorkeakoulu yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240266,Oxygen sensing,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402660,Oxygen sensing,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240337,PEDEGO-tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403370,PEDEGO-tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402760,PROFI3,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240276,PROFI3,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2420,Painoalat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240538,Paja,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405380,Paja,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240941,Palvelupisteet henkilƶstƶhallinto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240942,Palvelupisteet koulutuspalvelut 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240943,Palvelupisteet talouspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2460,Palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240271,Pihlajaniemi and Heljasvaara research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240271,2402710,Pihlajaniemi and Heljasvaara research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240332,Pohjois-Suomen syntymƤkohortti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403320,Pohjois-Suomen syntymƤkohortti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240038,Pohjoismainen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240038,2400380,Pohjoismainen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400313,Pohjoismainen filologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409150,Postipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409840,Private Equity rahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240984,Private Equity rahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240952,Professori Pentti Kaiteran rahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409520,Professori Pentti Kaiteran rahasto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240964,Projektitalous,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409640,Projektitalous,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240544,Prosessi- ja ympƤristƶtekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405440,Prosessi- ja ympƤristƶtekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240550,2405500,Prosessi- ja ympƤristƶtekniikan os. yht. (ei kƤytƶssƤ),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240550,Prosessi- ja ympƤristƶtekniikan osasto (ei kƤytƶssƤ),, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240560,Prosessimetallurgia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405600,Prosessimetallurgia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240272,Protein and Structural Biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402720,Protein and Structural Biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240272,Protein folding,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402720,Protein folding,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2470,Rahastot,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240406,Rahoituksen yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240406,2404060,Rahoituksen yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240536,2405360,Rakennesuunnittelun ja rakentamisteknologian tutkimusryhmƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405490,Rakennus- ja yhdyskuntatekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240549,Rakennus- ja yhdyskuntatekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409130,Rakentaminen ja kunnossapito,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240536,Rakenteet ja rakentamisteknologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405360,Rakenteet ja rakentamisteknologia,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240900,Rehtoraatin yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409000,Rehtoraatin yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409230,Resurssipooli,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240272,Ruddock research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240272,2402720,Ruddock research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240273,Vainio research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240273,2402730,Vainio research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240274,Wei research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240274,2402740,Wei research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240275,Wierenga and Venkatesan research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240275,2402750,Wierenga and Venkatesan research group,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24051,ARKKITEHTUURIN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24051,240510,Arkkitehtuurin tiedekunta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240510,2405100,Arkkitehtuurin tiedekunta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240760,SGO Havaintotoiminta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407600,SGO Havaintotoiminta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240762,SGO Tutkimus- ja kehitystoiminta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407620,SGO Tutkimus- ja kehitystoiminta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24098,Sijoitusvarallisuus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240333,SisƤtautien tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403330,SisƤtautien tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24076,SodankylƤn geofysiikan observatorio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24076,240760,SodankylƤn geofysiikan observatorio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240760,2407600,SodankylƤn geofysiikan observatorio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240589,Sovellettu ja laskennallinen matematiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405890,Sovellettu ja laskennallinen matematiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240218,Sovellettu ja laskennallinen matematiikka 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240218,2402180,Sovellettu ja laskennallinen matematiikka 31.7.2016 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240219,Sovellettu matematiikka ja tilastotiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240219,2402190,Sovellettu matematiikka ja tilastotiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24199,Strategia ja tiedepolitiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409280,Strategian ja tiedepolitiikan yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240928,Strategian ja tiedepolitiikan yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240917,Strategiarahat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409170,Strategiarahat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240268,Structural and Chemical Biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402680,Structural and Chemical Biology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240275,Structural enzymology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402750,Structural enzymology,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24000,240039,Suomen kieli,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240039,2400390,Suomen kieli,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400314,Suomen kieli,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240305,Suun terveyden tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403050,Suun terveyden tutkimusyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240559,Systeemitekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240559,2405590,Systeemitekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403410,SyƶvƤn ja translationaalisen lƤƤketieteen tutk,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240341,SyƶvƤn ja translationaalisen lƤƤketieteen tutk,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240341,SyƶvƤn ja translationaalisen lƤƤketieteen tutkimyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240341,2403410,SyƶvƤn ja translationaalisen lƤƤketieteen tutkimyksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405952,SƤhkƶtekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240558,SƤƤtƶtekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240558,2405580,SƤƤtƶtekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24050,TEKNILLINEN TIEDEKUNTA,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24062,TIETO- JA Sƃā€žHKƃā€“TEKNIIKAN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24062,TIETO- JA SƄHKƖTEKNIIKAN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240593,Tieto- ja sƤhkƶtekniikan tiedekunnan yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240593,2405930,Tieto- ja sƤhkƶtekniikan tiedekunnan yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240963,TK-lƤhipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409630,TK-lƤhipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409381,TKI kapasiteettipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405942,TST Fablab,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240595,TST Koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405950,TST Koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405933,TST Strateginen rahoitus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240594,TST Tiedekuntainfra,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405940,TST Tiedekuntainfra,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240593,2405932,TST Tietohallinto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240593,2405933,TST Strateginen rahoitus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240593,2405941,TST Tyƶpaja,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240593,2405942,TST Fablab,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240594,TST Tiedekuntainfra,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240594,2405940,TST Tiedekuntainfra,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240594,2405943,Fablab Infrastruktuuri,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240595,TST Koulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240595,2405950,TST Koulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240595,2405951,Tietotekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240595,2405952,SƤhkƶtekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240595,2405953,TietojenkƤsittelytietieteiden ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240589,Sovellettu ja laskennallinen matematiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240589,2405890,Sovellettu ja laskennallinen matematiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240208,"Empiirinen ohjelmistotuotanto ohjelmistoissa, jƤrjestelmissƤ ja palveluissa",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240208,2402080,"Empiirinen ohjelmistotuotanto ohjelmistoissa, jƤrjestelmissƤ ja palveluissa",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240596,Vakuuttavat verkko- ja mobiilipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240596,2405960,Vakuuttavat verkko- ja mobiilipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240597,KƤyttƶliittymƤt ja ihmiskeskeinen digitalisaatio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240597,2405970,KƤyttƶliittymƤt ja ihmiskeskeinen digitalisaatio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240582,Optoelektroniikka ja mittaustekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240582,2405820,Optoelektroniikka ja mittaustekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240583,Elektroniikan piirit ja jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240583,2405830,Elektroniikan piirit ja jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240586,Mikroelektroniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240586,2405860,Mikroelektroniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240584,KonenƤkƶ ja signaalianalyysi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240584,2405840,KonenƤkƶ ja signaalianalyysi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240585,Biosignaalien analyysi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240585,2405850,Biosignaalien analyysi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240599,Biomimetiikka ja ƤlykkƤƤt jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240599,2405990,Biomimetiikka ja ƤlykkƤƤt jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240611,Jokapaikan tietotekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240611,2406110,Jokapaikan tietotekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240587,CWC - Radioteknologiat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240587,2405870,CWC - Radioteknologiat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240612,CWC - Verkot ja jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240612,2406120,CWC - Verkot ja jƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24,2420,Painoalat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2420,24064,Eudaimonia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24064,240640,Eudaimonia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240640,2406400,Eudaimonia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2420,24066,Biocenter,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24066,240681,Biocenter,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240681,2406810,Biocenter,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2420,24068,Infotech,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24068,240680,Infotech,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240680,2406800,Infotech,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2420,24077,Thule-instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24077,240770,Thule-instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405941,TST Tyƶpaja henkilƶstƶkulut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405944,TST tyƶpaja infra,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405934,TST yhteisten muu henkilƶstƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240544,TTK koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405440,TTK koulutus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405460,TTK tutkimuksen strateginen rahoitus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240546,TTK tutkimuksen strateginen rahoitus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409240,Talouden suunnittelu ja kehitys,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24194,Talous,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240943,2409433,Taloushallinto Kontinkangas,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240943,2409473,Taloushallinto keskitetyt talouspalvelut palvelupiste,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240924,Talouspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240924,2409240,Talouspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409240,Talouspalvelut keskitetyt palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409640,Talouspalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240964,Talouspalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409241,Talouspalvelut yhteiset palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240403,Taloustieteen yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240403,2404030,Taloustieteen yksikkƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240404,"Taloustieteen, laskentatoimen ja rahoituksen yksik",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2404040,"Taloustieteen, laskentatoimen ja rahoituksen yksik",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240500,Teknillinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405000,Teknillinen tiedekunta yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409491,Tellus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240949,Tellus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24020,240223,Teoreettinen fysiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240223,2402230,Teoreettinen fysiikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403004,Terveystieteiden ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24077,Thule-instituutti,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240770,Thule-instituutti,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240770,2407700,Thule-instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240770,2407701,Arktinen lƤƤketiede,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240770,2407702,Uarctic toimisto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24,2430,Alueyksikƶt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2430,24073,Kerttu Saalasti instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240730,Kerttu Saalasti instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240730,2407300,Kerttu Saalasti instituutti,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240731,KSI AluekehittƤminen,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240731,2407310,KSI AluekehittƤminen,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240732,KSI Tulevaisuuden tuotantoteknologiat (FMT),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240732,2407320,KSI Tulevaisuuden tuotantoteknologiat (FMT),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240733,KSI MikroyrittƤjyys (MikroY),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240733,2407330,KSI MikroyrittƤjyys (MikroY),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240734,KSI Maanalainen fysiikka (CUPP),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240734,2407340,KSI Maanalainen fysiikka (CUPP),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24073,240736,KSI Raahe,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240736,2407360,KSI Raahe,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2430,24076,SodankylƤn geofysiikan observatorio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24076,240760,SodankylƤn geofysiikan observatorio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240760,2407600,SodankylƤn geofysiikan observatorio,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24076,240762,SGO Tutkimus- ja kehitystoiminta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240762,2407620,SGO Tutkimus- ja kehitystoiminta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2430,24080,Kajaanin yliopistokeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24080,240800,Kajaanin yliopistokeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240800,2408000,Kajaanin yliopistokeskus yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24080,240806,CEMIS-Oulu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240806,2408060,CEMIS-Oulu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240806,2408061,Analyyttinen kemia/bioanalytiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240806,2408062,Kuvantavat mittaukset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240806,2408063,Optinen spektroskopia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24080,240807,AIKOPA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240807,2408071,AIKOPA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24,2440,Erillisyksikƶt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2440,24065,UniOGS,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24065,240650,University of Oulu Graduate School,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240650,2406500,University of Oulu Graduate School,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2440,24081,Kirjasto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24081,240810,Kirjasto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408100,Kirjasto yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408101,Hankintapalvelu,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2410,Tiedekunnat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240541,Tiedekuntalaboratorio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405410,Tiedekuntalaboratorio,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240034,2400341,TiedeviestinnƤn maisteriohjelma,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400333,TiedeviestinnƤn maisteriohjelma,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240593,Tieto- ja sƤhkƶtekniikan tiedekunnan yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405930,Tieto- ja sƤhkƶtekniikan tiedekunnan yhteiset,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408102,Tietoaineistopalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408103,Lainaus- ja neuvontapalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408104,Koulutus- ja tietopalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408150,Kirjaston hallintopalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2440,24086,TƤydentƤvien opintojen keskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24086,240820,Kieli- ja viestintƤkoulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240820,2408200,Kieli- ja viestintƤkoulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24086,240840,Aikuiskoulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240840,2408400,Aikuiskoulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240840,2408406,Avoin yliopisto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24,2445,Infrastruktuuri,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24067,Biocenter Core,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24067,240670,Biocenter Core,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240670,2406700,Biocenter Core,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24079,Mikroskopian ja nanoteknologian keskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24079,240790,Mikroskopian ja nanoteknologian keskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240790,2407900,Mikroskopian ja nanoteknologian keskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24083,Koe-elƤinkeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24083,240830,Koe-elƤinkeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240830,2408300,Koe-elƤinkeskus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24085,Oulangan tutkimusasema,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24085,240771,Oulangan tutkimusasema,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240771,2407710,Oulangan tutkimusasema,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24088,LEAF,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24088,240778,LEAF,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240778,2407780,LEAF,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2445,24089,Biodiversiteettiyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24089,240212,Biodiversiteettiyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240212,2402120,Biodiversiteettiyksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24,2450,Yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2450,24091,Yliopiston yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24091,240910,Yliopiston yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240910,2409100,Yliopiston yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24091,240911,Kiinteistƶt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409110,Kiinteistƶt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409115,Tilanvaraustilat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409125,Autopaikat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409130,Rakentaminen ja kunnossapito,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409135,Turvallisuus ja ympƤristƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409140,Aulapalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409150,Postipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409155,Kalusteasentajat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240911,2409160,Hankintapalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24091,240917,Strategiarahat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240917,2409170,Strategiarahat,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24091,240918,YO Tietohallinto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240918,2409180,YO Lisenssit,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240918,2409181,YO Infrastruktuuri,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240918,2409182,YO TietojƤrjestelmƤt,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24091,240901,YO Koulutuspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240901,2409010,YO Koulutuspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24,2460,Johto ja palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2460,24092,Johto ja palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240920,YK-hyvitykset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240920,2409200,YK-hyvitykset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240900,Rehtoraatin yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240900,2409000,Rehtoraatin yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240921,Johto ja palvelut - yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240921,2409210,Johto ja palvelut - yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240922,Henkilƶstƶpalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240922,2409220,Henkilƶstƶpalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240923,Koulutuspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409230,Resurssipooli,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409232,Opintotuki (entinen tyƶelƤmƤpalvelut) 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409236,Opetuksen tuki,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409237,Opiskelijavalinta,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409238,Opiskeluoikeuden yllƤpito,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240923,2409239,Opiskelun tuki,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240924,Talouspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240924,2409240,Talouspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240925,Tila- ja turvallisuuspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240925,2409250,Tila- ja turvallisuuspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240925,2409251,Virastomestaripalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240927,"ViestintƤ, markkinointi ja yhteiskuntasuhteet",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240927,2409270,"ViestintƤ, markkinointi ja yhteiskuntasuhteet",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240961,Tietohallinto lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409610,Tietohallinto lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240930,Tietohallintopalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409300,Tietohallintopalvelut yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405953,TietojenkƤsittelytietieteiden ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409360,TietojƤrjestelmƤpalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405951,Tietotekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240925,Tila- ja turvallisuuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409250,Tila- ja turvallisuuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409115,Tilanvaraustilat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240546,Tohtorikoulutettavat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240546,2405460,Tohtorikoulutettavat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240545,Tuotantotalouden ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405450,Tuotantotalouden ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240570,Tuotantotalous,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405700,Tuotantotalous,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240531,Tuotantotekniikka 31.12.2015 saakka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409135,Turvallisuus ja ympƤristƶ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409252,Turvallisuuspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240928,Tutkimuksen strategiset palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240928,2409280,Tutkimuksen strategiset palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240930,Tietohallintopalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409300,Tietohallintopalvelut yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409305,Asiakirjapalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409360,TietojƤrjestelmƤpalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409380,Infrapalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409385,Opetuksen ja tutkimuksen IT-tukipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240930,2409390,Asiakaspalvelu,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240941,Palvelupisteet henkilƶstƶhallinto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240941,2409471,Henkilƶstƶhallinto keskitetyt palvelut (KEPA),, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240942,Palvelupisteet koulutuspalvelut 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409412,Koulutuspalvelut HKT 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409422,Koulutuspalvelut LUTK 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409432,Koulutuspalvelut Kontinkangas 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409442,Koulutuspalvelut TTK 31.7.2016 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240943,Palvelupisteet talouspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240943,2409433,Taloushallinto Kontinkangas,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240943,2409473,Taloushallinto keskitetyt talouspalvelut palvelupiste,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240948,Tutkimuksen tukipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240948,2409480,Tutkimuksen tukipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240949,Innovaatio- ja yrittƤjyyspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240949,2409245,Innovaatio- ja yrittƤjyyspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240720,Center for Health and Technology,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240720,2407200,Center for Health and Technology,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240775,CEE InnovaatiokeskittymƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240775,2407750,CEE InnovaatiokeskittymƤ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240531,Tuotantotekniikka 31.12.2015 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240773,NorTech 31.12.2015 saakka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24062,240700,CIE 31.12.2015 saakka,, -Poliisiammattikorkeakoulu,Police University College,Polisyrkeshƶgskolan,02557,,,,http://isni.org/isni/0000000097577818,1602 +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240948,Tutkimuksen tukipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409480,Tutkimuksen tukipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240963,Tutkimuksen tukipalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409630,Tutkimuksen tukipalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24195,Tutkimus- ja projektipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240538,Tyƶpaja,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240538,2405380,Tyƶpaja,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240224,TƤhtitiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402240,TƤhtitiede,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24086,TƤydentƤvien opintojen keskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407702,Uarctic toimisto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409241,Ulkoinen laskenta,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24065,UniOGS,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240650,University of Oulu Graduate School,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406500,University of Oulu Graduate School,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409271,"VMY, yliopiston ja OAMKin yhteiset palvelut",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240273,Vainio research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240273,2402730,Vainio research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240596,Vakuuttavat verkko- ja mobiilipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405960,Vakuuttavat verkko- ja mobiilipalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409820,Valtion pƤƤomittamat varat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240982,Valtion pƤƤomittamat varat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409830,Valtion pƤƤomittamat varat 2,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240983,Valtion pƤƤomittamat varat 2,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24097,Vapaat rahastot,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409810,Varainhankinta 2015-2017,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240981,Varainhankinta 2015-2017,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409800,Varainkeruuvarat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240980,Varainkeruuvarat,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240552,Vesi- ja ympƤristƶtekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240552,2405520,Vesi- ja ympƤristƶtekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405520,"Vesi-, energia- ja ympƤristƶtekniikka",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240552,"Vesi-, energia- ja ympƤristƶtekniikka",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240962,ViestinnƤn lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409620,ViestinnƤn lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24196,ViestintƤ,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240927,"ViestintƤ, markkinointi ja yhteiskuntasuhteet",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409270,"ViestintƤ, markkinointi ja yhteiskuntasuhteet",, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409620,ViestintƤpalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240962,ViestintƤpalvelut lƤhipalvelutiimi,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409251,Virastomestaripalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240274,Wei research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240274,2402740,Wei research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240275,Wierenga and Venkatesan research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240275,2402750,Wierenga and Venkatesan research group,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240920,YK-hyvitykset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409200,YK-hyvitykset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409181,YO Infrastruktuuri,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240901,YO Koulutuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409010,YO Koulutuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409180,YO Lisenssit,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240918,YO Tietohallinto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409182,YO TietojƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2450,Yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24092,Yhteiset palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240948,Yhteiset tk-palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409480,Yhteiset tk-palvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240990,Yliopiston innovaatiokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409900,Yliopiston innovaatiokeskus,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24091,Yliopiston yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240910,Yliopiston yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409100,Yliopiston yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409910,Yliopiston yleishallinto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240991,Yliopiston yleishallinto,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240555,YmpƤristƶ- ja kemiantekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405550,YmpƤristƶ- ja kemiantekniikka,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240949,YrittƤjyyspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240532,ƃā€žlykkƤƤt koneet ja jƤrjestelmƤt,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405320,ƃā€žlykkƤƤt koneet ja jƤrjestelmƤt,, +Oulun yliopistollisen sairaalan erityisvastuualue,Oulu University Hospital Catchment Area,,06794809,,,,, +Poliisiammattikorkeakoulu,Police University College,,02557,,,,, +Ruokavirasto,Finnish Food Authority,,430001,,,,, Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,,,http://isni.org/isni/0000000404184038,31 -Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2017,Opiskelijapalvelut ja opetuksen yhteiset,, -Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2040,TKI- ja palvelutoiminta,, Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2099,AMK yhteiset palvelut,, -Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2100,Kulttuuri,, -Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2200,Liiketalous,, Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2310,Hotelli- ja ravintola-ala,, -Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2500,Tekniikka,, -Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2400,Sosiaali- ja terveysala,, Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2900,Kielikeskus,, -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,,,http://isni.org/isni/0000000101657504,1748 -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,TECH,Teknologia,, -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,HYVO,Hyvinvointi,, -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,LOME,Logistiikka ja meriteknologia,, +Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2100,Kulttuuri,, +Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2200,Liiketalous,, +Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2017,Opiskelijapalvelut ja opetuksen yhteiset,, +Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2400,Sosiaali- ja terveysala,, +Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2040,TKI- ja palvelutoiminta,, +Saimaan ammattikorkeakoulu,Saimaa University of Applied Sciences,,02609,,2500,Tekniikka,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,,02507,,,,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,HYVO,Hyvinvointi ja terveys,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,LOME,Logistiikka ja meriteknologia,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,OPVA,Opetuksen vararehtorin toimisto,, Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,OPET,Opetuspalvelut,, -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,PATA,Palveluliiketoiminta,, -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,RETO,Rehtorin toimisto,, -Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,TAHA,Talous ja hallinto,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,PATA,Palveluliiketoiminta,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,RETO,Rehtorin toimisto,, Satakunnan ammattikorkeakoulu,SAMK Satakunta University of Applied Sciences,,02507,,TKI,TKI-palvelut,, -Savonia,University of Applied Sciences Savonia,,02537,,,,http://isni.org/isni/0000000404176521,1759 -Savonia,University of Applied Sciences Savonia,,02537,,100,LiTe vastuualueen yhteiset,, -Savonia,University of Applied Sciences Savonia,,02537,,110,Teknologia ja ympƤristƶala,, -Savonia,University of Applied Sciences Savonia,,02537,,300,Liiketalousala,, -Savonia,University of Applied Sciences Savonia,,02537,,350,Matkailu- ja ravitsemisala,, -Savonia,University of Applied Sciences Savonia,,02537,,400,Hyvin vastuualueen yhteiset,, -Savonia,University of Applied Sciences Savonia,,02537,,410,Sosiaali- ja terveysala,, -Savonia,University of Applied Sciences Savonia,,02537,,500,Kulttuuriala,, -Savonia,University of Applied Sciences Savonia,,02537,,600,Luonnonvara-ala,, -Savonia,University of Applied Sciences Savonia,,02537,,800,Savonia liiketoiminta,, -Savonia,University of Applied Sciences Savonia,,02537,,900,Hallinto- ja korkeakoulupalvelut,, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,,,,http://isni.org/isni/0000000104772049,U2VpbsOkam9lbiBhbW1hdHRpa29ya2Vha291bHU= -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,ElMa,SeAMK Elintarvike ja maatalous,, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,MaMe,Luonnonvara (maa- & metsƤtalousalat),, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,TAHA,Talous ja hallinto,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,TECH,Teknologia,, +Satakunnan ammattikorkeakoulu,Satakunta University of Applied Sciences,Satakunnan ammattikorkeakoulu,02507,,TUVA,Tutkimuksen vararehtorin toimisto,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,,02537,,,,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,900,Hallinto- ja korkeakoulupalvelut,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,400,Hyvin vastuualueen yhteiset,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,121,Jatkuva oppiminen,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,920,Kampukset ja toimitilat,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,131,KansainvƤlisyys,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,500,Kulttuuriala,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,100,LiTe vastuualueen yhteiset,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,300,Liiketalousala,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,600,Luonnonvara-ala,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,350,Matkailu- ja ravitsemisala,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,902,Muut palvelut,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,901,Opiskelijaa palvelevat palvelut,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,840,Palveluliiketoiminta,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,800,Savonia liiketoiminta,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,990,Savonia yhteiset,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,410,Sosiaali- ja terveysala,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,830,TK-toiminta,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,110,Teknologia ja ympƤristƶala,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,110,Teknologia ja ympƤristƶals,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,111,Tutkintokoulutus,, +Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,700,Yrityspalvelut,, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,,,,, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,BiEli,Bio- ja elintarviketekniikka (tekniikan alat),, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,BiEli,Bio- ja elintarviketekniikka (tekniikka),, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,Resto,Ravitsemisala (palveluala),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,KiTi,Kirjasto ja tietopalvelu (yhteiskunnalliset alat),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Kult,Kulttuuri (taiteet ja kulttuuri),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Liike,"Liiketalous (kauppa, hallinto & oikeus)",, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,MaMe,Luonnonvara (maa- & metsatalousalat),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,MaMe,Luonnonvara (maa- & metsƤtalousalat),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Resto,Ravitsemisala (palveluala),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,ElMa,SeAMK Elintarvike ja maatalous,, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,LiKu,LiKu,SeAMK Liiketoiminta ja kulttuuri,, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,LiKu,Liike,"Liiketalous (kauppa, hallinto & oikeus)",, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,LiKu,Kult,Kulttuuri (taiteet ja kulttuuri),, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,LiKu,KiTi,Kirjasto ja tietopalvelu (yhteiskunnalliset alat),, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,SosTer,SosTer,SeAMK Sosiaali- ja terveysala (terveys & hyvinvointi),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,SosTer,SeAMK Sosiaali- ja terveysala (terveys & hyvinvoin,, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,SosTer,SeAMK Sosiaali- ja terveysala (terveys & hyvinvointi),, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,Tekn,Tekn,SeAMK Tekniikka,, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Teka,SeAMK Tekniikka (tekniikan alat),, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Tsto,SeAMK Toimisto,, +SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Tite,Tietotekniikka (tietojenkasittely & tietoliikenne),, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,Tekn,Tite,Tietotekniikka (tietojenkƤsittely & tietoliikenne),, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,Tsto,Tsto,SeAMK Toimisto,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,,,http://isni.org/isni/0000000111777737,SGFua2VuIFN2ZW5za2EgaGFuZGVsc2jDtmdza29sYW4= -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,1,FƶrvaltningsƤmbetet,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,2,Institutionen fƶr finansiell ekonomi,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,3,Institutionen fƶr fƶretagsledning och organisation,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,4,Institutionen fƶr redovisning och handelsrƤtt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,5,Institutionen fƶr marknadsfƶring,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,6,Institutionen fƶr nationalekonomi,, +Suomen Akatemia,Academy of Finland,,02458939,,,,, +Suomen LƤƤketieteen SƤƤtiƶ,Finnish Medical Foundation,,02211063,,,,, +Suomen Pankki,Bank of Finland,,02022481,,,,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,,7020017,,,,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020180001,Administrative services,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020110001,Biodiversiteettikeskus,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020130001,Centre for sustainable consumption and production,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100010,Climate change programme,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020190001,Communications,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020170001,Data and information centre,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020200001,Enheten for internationella Ƥrenden,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020140001,Freshwater centre,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020180001,Hallintopalvelut,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020120001,Havscentret,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100010,Ilmastonmuutos,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100001,Johdon tuki,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100030,KestƤvƤ kiertotalous,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020130001,Kulutuksen ja tuotannon keskus,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020160001,Laboratoriecentret,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100001,Ledningens stƶd,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020120001,Merikeskus,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020150001,Miljƶpolitikscentret,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100040,Program om miljƶinformation,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100040,Programme for environmental information,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100020,Sustainable urbanisation programme,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020170001,Tietokeskus,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020140001,Vesikeskus,, +Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020150001,YmpƤristƶpolitiikkakeskus,, +Svenska handelshƶgskolan,Hanken School of Economics,,01910,,,,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8355,Aarresaari-nƤtverket,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7421,AllmƤn adm. och personalƤrenden - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7422,AllmƤn adm. och personalƤrenden - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,8742,AllmƤn adm. och personalƤrenden / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7422,AllmƤn administration och personalƤrenden - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7421,AllmƤn administration och personalƤrenden -Helsi,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8742,AllmƤn administration och personalƤrenden / proj,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7456,Alumni - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7561,Arkadia,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,7,Biblioteket,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8321,Biblioteket,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3211,Biblioteket - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3215,Biblioteket - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8321,Biblioteket / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8525,CCR - Centre for Corporate Responsibility,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4251,CCR - PRME och Green Office,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8531,CERS - Centre for Relationship Marketing and Servi,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8532,CERS - Centre for Relationship Marketing and Servi,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8531,CERS - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7660,Capman,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7521,Casa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,8,8331,Centret fƶr forskn.o.int.Ƥr./projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,8,Centret fƶr forskning och internationella Ƥrenden,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,9,Datacentralen,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,10,Hanken Fortbildning,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,11,Centret fƶr sprĆ„k och affƤrskommunikation,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,19,Samfinansierade enheter,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,20,FristĆ„ende institutioner,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,7,3211,Biblioteket - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,20,3212,Tritonia,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,8,3311,Centret fƶr forskning och internationella Ƥrenden,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,8,3331,Forskningsservice,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,9,3411,Datacentralen - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,9,3412,Datacentralen - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,2,4111,Finansiell ekonomi - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,2,4112,Finansiell ekonomi - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,2,4121,Statistik - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,2,4122,Statistik - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4211,Fƶretagsledning och organisation - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4212,Fƶretagsledning och organisation - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4221,Entreprenƶrskap och fƶretagsledning - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4222,Entreprenƶrskap och fƶretagsledning - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4231,Informationsbehandling - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4232,Informationsbehandling - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,4311,HandelsrƤtt - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,4312,HandelsrƤtt - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,4321,Redovisning - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,4322,Redovisning - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,4411,Marknadsfƶring - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,4412,Marknadsfƶring - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,4421,Logistik och samhƤllsansvar,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,4511,Nationalekonomi - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,4512,Nationalekonomi - Vasa,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4705,Centret fƶr sprĆ„k,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4711,Svenska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4712,Svenska -Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4721,Finska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4722,Finska - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4731,Engelska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4732,Engelska - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4741,Tyska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4742,Tyska - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4751,Franska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4761,Spanska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4771,Ryska - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7311,Ɩppna universitetet - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7312,Ɩppna universitetet - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7411,RektorsƤmbetet - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7421,AllmƤn adm. och personalƤrenden - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7422,AllmƤn adm. och personalƤrenden - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7431,StudiebyrĆ„n - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7432,StudiebyrĆ„n - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7451,Marknadsfƶring och information - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7452,Marknadsfƶring och information - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7453,KarriƤrtjƤnster - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7455,KarriƤrtjƤnster - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7456,Alumni - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7458,Partnerskap,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7459,Fundraising,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7461,Ekonomiavdelningen,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7472,NƤringslivskoordinator - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,11,Centret fƶr sprĆ„k och affƤrskommunikation,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4701,Centret fƶr sprĆ„k och affƤrskommunikation - Hel,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4702,Centret fƶr sprĆ„k och affƤrskommunikation - Vas,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4705,"Centret fƶr sprĆ„k och affƤrskommunikation, admi",, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7620,Danske Capital,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,9,Datacentralen,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3411,Datacentralen - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3412,Datacentralen - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8341,Datacentralen / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7610,Donationskapital,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8523,EPCE - Erling-Persson Centre for Entrepreneurship,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7640,EQ,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7531,Economicum,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7461,Ekonomiavdelningen,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8746,Ekonomiavdelningen / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4731,Engelska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4732,Engelska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4221,Entreprenƶrskap och fƶretagsledning - Helsingfor,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,4221,Entreprenƶrskap och fƶretagsledning - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4222,Entreprenƶrskap och fƶretagsledning - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8427,Entreprenƶrskap och fƶretagsledning - Vasa / pro,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8422,Entreprenƶrskap och fƶretagsledning / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7630,Evli,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7670,Evli PE,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7511,Fastigheter och servicepersonal - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8751,Fastighetsfƶrvaltning - Helsingfors / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8752,Fastighetsfƶrvaltning - Vasa / projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7491,Fastighetspersonal och infra - Helsingfors,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7492,Fastighetspersonal och infra - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7511,Fastigheter och servicepersonal - Helsingfors,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7721,Helsingforsalliansen,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,10,8316,"MBA, Modulkostnader",, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,10,8317,MBA,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,8,8331,Centret fƶr forskn.o.int.Ƥr./projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,19,8351,KATAJA,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,2,8411,Finansiell ekonomi / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,8412,Statistik / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7491,Fastighetsservice - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7492,Fastighetsservice - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4111,Finansiell ekonomi - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4112,Finansiell ekonomi - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8416,Finansiell ekonomi - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8411,Finansiell ekonomi / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4721,Finska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4722,Finska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,22,Forsknings- och universitetsservice,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3331,Forskningsservice,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8333,Forskningsservice / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4751,Franska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4752,Franska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,20,FristĆ„ende institutioner,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7459,Fundraising,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4211,Fƶretagsledning och organisation - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4212,Fƶretagsledning och organisation - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8426,Fƶretagsledning och organisation - Vasa / projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,8421,Fƶretagsledning och organisation / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,8422,Entreprenƶrskap och fƶretagsledning / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,3,8423,Informationsbehandling / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,8431,HandelsrƤtt / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,8432,Redovisning / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8441,Marknadsfƶring / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8442,Logistik och samhƤllsansvar /projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,8451,Nationalekonomi / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,8471,Samtliga sprĆ„k / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,10,8511,Hanken fortbildning Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,8521,IPR University Center,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8531,CERS - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8421,Fƶretagsledning och organisation /projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,1,FƶrvaltningsƤmbetet,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8529,"GODESS - Gender, Organization, Diversity, Equality",, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8565,"HCCG - Hanken Centre for Accounting, Finance and G",, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8565,HCCG - Hanken Centre for Corporate Governance,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8535,HUMLOG - The Humanitarian Logistics and Supply Cha,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8535,HUMLOG Institute,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4311,HandelsrƤtt - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4312,HandelsrƤtt - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8436,HandelsrƤtt - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8431,HandelsrƤtt / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7482,Hanken Business Lab Stugan,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8749,Hanken Business Lab Stugan / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7481,Hanken Business Lab Torget,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8748,Hanken Business Lab Torget / projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,4,8565,Hanken Centre for Corporate Governance,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,8742,AllmƤn adm. och personalƤrenden / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,8743,StudiebyrĆ„n / projekt,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7321,Samlad service,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,10,Hanken Fortbildning,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8511,Hanken fortbildning Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7721,Helsingforsalliansen,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8772,Helsingforsalliansen / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4151,Helsinki GSE / UKM,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8415,"Helsinki GSE / externa medel, projekt",, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7511,Huvudbyggnad - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7512,Huvudbyggnad - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8521,IPR University Center,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4231,Informationsbehandling - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4232,Informationsbehandling - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8428,Informationsbehandling - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8423,Informationsbehandling / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,2,Institutionen fƶr finansiell ekonomi,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,25,Institutionen fƶr finansiell ekonomi och national,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3,Institutionen fƶr fƶretagsledning och organisati,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,03,Institutionen fƶr fƶretagsledning och organisati,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,5,Institutionen fƶr marknadsfƶring,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,05,Institutionen fƶr marknadsfƶring,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,6,Institutionen fƶr nationalekonomi,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4,Institutionen fƶr redovisning och handelsrƤtt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,04,Institutionen fƶr redovisning och handelsrƤtt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7471,Institutionsadministration - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7472,Institutionsadministration - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7474,Institutionsadministration - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3311,Internationella Ƥrenden,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8331,Internationella Ƥrenden / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,23,Internationella Ƥrenden och externa relationer,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8351,KATAJA,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7453,KarriƤrtjƤnster - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7455,KarriƤrtjƤnster - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8744,KarriƤrtjƤnster / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4421,Logistik och samhƤllsansvar,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8447,Logistik och samhƤllsansvar - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8442,Logistik och samhƤllsansvar / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8442,Logistik och samhƤllsansvar /projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,10,8317,MBA,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,10,8316,"MBA, Modulkostnader",, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7650,Mandatum,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4411,Marknadsfƶring - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4412,Marknadsfƶring - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8446,Marknadsfƶring - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8441,Marknadsfƶring / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7451,Marknadsfƶring och information - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7452,Marknadsfƶring och information - Vasa,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,8745,Marknadsfƶring och information / projekt,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,,,http://isni.org/isni/0000000417907610,1942 -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1260,KuvA/Johdon tuki,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1270,KuvA/NƤyttelytoiminnan tuki,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1380,KuvA/Kv-avaukset,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1590,KuvA/Kirjasto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2201,TeaK/Ohjauksen koulutusohjelma,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2231,TeaK/Tanssin koulutusohjelma BA,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2822,TeaK/Opetuksen suunnitt ja keh,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2832,TeaK/NƤyttƤmƶ ja tarpeisto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31160,SibA/Musiikkikasvatuksen aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3740,37410,Konserttivahtimestarit,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3750,SibA/Esitystekniikkapalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3801,SibA/Varadekaani I,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3830,SibA/Primo ohjelmapalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7100,ViestintƤpalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7400,Opintopalvelut,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7451,Marknadsfƶring och kommunikation - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7452,Marknadsfƶring och kommunikation - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8745,Marknadsfƶring och kommunikation / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7811,Materiallagret - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7812,Materiallagret - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,4511,Nationalekonomi - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,4512,Nationalekonomi - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8418,Nationalekonomi - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,6,8451,Nationalekonomi / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8413,Nationalekonomi / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4131,Nationalekonomi Ć¢ā‚¬ā€œ Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4132,Nationalekonomi Ć¢ā‚¬ā€œ Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7478,NƤringslivskontakter - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8747,NƤringslivskontakter - Helsingfors / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7479,NƤringslivskontakter - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7472,NƤringslivskoordinator - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7458,Partnerskap,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7590,"Placeringar, ej utlokaliserade",, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4321,Redovisning - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4322,Redovisning - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8437,Redovisning - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8432,Redovisning / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,9234,Rektors projektmedel / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,24,RektorsƤmbetet,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7411,RektorsƤmbetet - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7412,RektorsƤmbetet - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8741,RektorsƤmbetet / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4771,Ryska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4772,Ryska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,19,Samfinansierade enheter,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7321,Samlad service,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8472,Samtliga sprĆ„k - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8471,Samtliga sprĆ„k / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4761,Spanska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4762,Spanska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4121,Statistik - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4122,Statistik - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8417,Statistik - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8412,Statistik / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,9237,Strategiska satsning / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7441,Studentrekrytering - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7431,StudiebyrĆ„n - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7432,StudiebyrĆ„n - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8744,StudiebyrĆ„n - Vasa / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8743,StudiebyrĆ„n / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,21,Studier och antagning,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4711,Svenska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4712,Svenska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,11,4712,Svenska -Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8360,Termins- och studieavgifter,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,20,3212,Tritonia,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3212,Tritonia - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4741,Tyska - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4742,Tyska - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8542,WCEFIR - Wallenberg Center for Financial Research,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8541,WCEFIR - Wallenberg Center for Financial Research,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,1100,Wahlbergs fond,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7311,ƃā€“ppna universietet - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8731,ƃā€“ppna universitet / projekt,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7312,ƃā€“ppna universitetet - Vasa,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7311,Ɩppna universitetet - Helsingfors,, +Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7312,Ɩppna universitetet - Vasa,, +Svenska kulturfonden,,,23793568,,,,, +Svenska litteratursƤllskapet i Finland rf,Society of Swedish Literature in Finland,,02001381,,,,, +SyƶpƤsƤƤtiƶ,Cancer Foundation,,02371657,,,,, +SƤteilyturvakeskus,Radiation and Nuclear Safety Authority,,5550012,,,,, +Taideyliopisto,University of the Arts Helsinki,,10103,,,,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8400,Avoin kampus,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8420,Avoin yliopisto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7000,Henkilƶstƶ- ja johdon tukipalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7310,IT/Sovellus- ja infrapalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7300,IT/Tietohallinto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7330,IT/Tukipalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8300,Jatkokoulutus ja tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41300,K/Henkilƶstƶ- ja kehittƤmispalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,100,K/Johto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41101,K/Kuvanveisto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41200,K/Kuvataiteen tohtoriohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41103,K/Maalaustaide,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,411041,K/Nykytaiteen tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41302,K/NƤyttelypalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,110,K/Opetus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41301,K/Opintopalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,130,K/Palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41105,"K/Praxis, taiteen esittƤmiskƤytƤnnƶt",, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41100,K/Taidegrafiikka,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,411040,K/Taidehistoria ja -teoria,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,411042,K/Taidepedagogiikka,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41303,K/Tekniset tukipalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41102,K/Tila-aikataide,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,120,K/Tohtorikoulutus ja tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41201,K/Tutkimustoiminta,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,41104,K/Yhteinen opetus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7610,Kallio-Kuninkala,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8210,Kielten opetus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7800,Kirjasto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,37820,Klassisen musiikin opintopalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3740,37410,Konserttivahtimestarit,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1390,KuvA/EsittƤmiskƤytƤnnƶt ja tilallisuus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1250,KuvA/Henkilƶstƶpalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1260,KuvA/Johdon tuki,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1100,KuvA/Johto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1130,KuvA/ViestintƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1590,KuvA/Kirjasto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1320,KuvA/Kuvanveisto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1380,KuvA/Kv-avaukset,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1340,KuvA/Maalaustaide,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1500,KuvA/Nykytaiteen laboratorio,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1270,KuvA/NƤyttelytoiminnan tuki,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1510,KuvA/NƤyttelytoiminta,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1210,KuvA/Opintopalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1220,KuvA/Tekniset palvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1250,KuvA/Henkilƶstƶpalvelut,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1300,KuvA/Opiskelun tuki,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1310,KuvA/Taidegrafiikka,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1320,KuvA/Kuvanveisto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1430,KuvA/Taiteellinen tutkimus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1220,KuvA/Tekniset palvelut,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1330,KuvA/Tila-aikataide,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1340,KuvA/Maalaustaide,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1360,KuvA/Yhteisen opetuksen opetusalue,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1390,KuvA/EsittƤmiskƤytƤnnƶt ja tilallisuus,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1410,KuvA/Tohtorikoulutusohjelma,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1430,KuvA/Taiteellinen tutkimus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1500,KuvA/Nykytaiteen laboratorio,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1510,KuvA/NƤyttelytoiminta,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2101,TeaK/Dekaani,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2200,TeaK/NƤyttelijƤtyƶn koulutusoh,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2202,TeaK/Dramaturgian koulutusohj,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2203,TeaK/Live Art and Performance S,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2210,TeaK/MA in Ecology and Contemporary Performance,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2220,TeaK/Utbildingsprog. fƶr skĆ„despelarkonst,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2232,TeaK/Tanssijan maisteriohjelma,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2233,TeaK/Koreografin maisteriohj,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2241,TeaK/Valosuunnittelun koulutus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2242,TeaK/ƄƤnisuunnittelun koulutus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2251,TeaK/Tanssiopettajan koulutus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2252,TeaK/Teatteriopettajan koulutus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2260,TeaK/EsittƤvien Taiteiden Tutk,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2270,TeaK/Yhteisen opetuksen keskus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2811,TeaK/Hallintopalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2812,TeaK/ViestintƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2821,TeaK/Opintohallinto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2823,TeaK/Kirjasto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2830,TeaK/Opetusteatteri,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2831,TeaK/Esitystuotanto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2833,TeaK/Puvusto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2834,TeaK/Valo ja ƤƤni,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,3111,"SibA/Musiikkikasvatuksen, jazzin ja kansanmusiikin osasto",, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1130,KuvA/ViestintƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,1360,KuvA/Yhteisen opetuksen opetusalue,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,10,Kuvataideakatemia,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,37810,"Musiikkikasvatuksen, jazzin ja kansanmusiikin opintopalvelut",, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7690,N-talohanke,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8200,Opetus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7400,Opintopalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,37830,Orkesterikoulutuksen opintopalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8000,Rehtoraatti,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43204,S/Arts Management,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,330,S/Dekaanin alaisuudessa olevat palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43108,S/DocMus-tohtorikoulu,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434010,S/Esitystekniikka,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,432011,S/Global music,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43300,S/Henkilƶstƶ- ja kehittƤmispalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,340,S/I Varadekaanin alaisuudessa olevat palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,350,S/II Varadekaanin alaisuudessa,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43200,S/Jazzmusiikki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43000,S/Johdon tuki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,300,S/Johto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43100,S/Jouset ja kamarimusiikki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,432010,S/Kamu,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43302,S/KansainvƤliset asiat,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43201,S/Kansanmusiikki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,431090,S/Kimu Kuopio,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,431091,S/Kimu Kuopio hallinto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43109,S/Kirkkomusiikki (Kuopio),, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43101,S/Kirkkomusiikki ja urut (Helsinki),, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,310,S/Klasu,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43110,S/Klasu johdon tuki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434004,S/Konserttivahtimestarit,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43402,S/Koulutuksen kehittƤminen,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,431020,S/Laulumusiikki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43102,S/Laulumusiikki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434002,S/Lavatiimi,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,320,S/MJK,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43207,S/MJK johdon tuki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43205,S/MuTri-tohtorikoulu,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,432050,S/MuTri-tohtorikoulu1,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,432051,S/MuTri-tohtorikoulu2,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43103,S/Musiikin johtaminen,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43202,S/Musiikkikasvatus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43203,S/Musiikkiteknologia,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,431021,S/Ooppera,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43301,S/Opintopalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434003,S/Orkesterisoitinten opintopalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43104,"S/Piano, harmonikka, kitara ja kantele",, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,431040,S/Pimu,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434001,S/Primo,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43105,"S/Puhaltimet, lyƶmƤsoittimset ja harppu",, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43206,S/SeinƤjoki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434011,S/Soitinhuolto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,431041,S/SƤestys,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43106,S/SƤvellys ja musiikinteoria,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43401,S/Tekniset tuotantopalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43400,S/Tuottajapalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,434000,S/Tuottajat,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43500,S/Tutkimus- ja tohtorikoulutuspalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,43107,S/Vanha musiikki,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3800,SibA/Dekaani,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32270,SibA/DocMus-tohtorikoulu,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3750,SibA/Esitystekniikkapalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3705,SibA/Henkilƶstƶ- ja johdon tukipalvelut,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31120,SibA/Jazzin aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32240,SibA/Jousten aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3817,SibA/KansainvƤliset asiat,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31130,SibA/Kansanmusiikin aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31165,SibA/Taidehallinnon aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31190,SibA/Musiikkiteknologian aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31270,SibA/MuTri-tohtorikoulu,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3811,SibA/KehittƤmiskeskus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3780,SibA/Keskitetyt opintopalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3600,SibA/Kirjasto,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32140,"SibA/Kirkkomusiikin ja urkujen aineryhmƤ, Hki",, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32150,"SibA/Kirkkomusiikin ja urkujen aineryhmƤ, Kuopio",, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32170,SibA/Laulun ja korrepetition aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32180,SibA/SƤvellyksen ja musiikinteorian aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32210,SibA/Pianon aineryhmƤ (piano+kantele+kitara+harmonikka),, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,3222,SibA/Klassisen musiikin osasto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32230,"SibA/Orkesteri-, kamarimusiikki- ja kapellimestarikoulutuksen aineryhmƤ",, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32240,SibA/Jousten aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32250,SibA/Puhaltimien aineryhmƤ (lyƶmƤsoittimet+harppu),, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32260,SibA/Vanhan musiikin aineryhmƤ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32270,SibA/DocMus-tohtorikoulu,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,3251,SibA/Kuopion hallinto,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,3252,SibA/Kuopion kirjasto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32170,SibA/Laulun ja korrepetition aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31270,SibA/MuTri-tohtorikoulu,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31160,SibA/Musiikkikasvatuksen aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,3111,"SibA/Musiikkikasvatuksen, jazzin ja kansanmusiikin osasto",, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31190,SibA/Musiikkiteknologian aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32230,"SibA/Orkesteri-, kamarimusiikki- ja kapellimestarikoulutuksen aineryhmƤ",, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32210,SibA/Pianon aineryhmƤ (piano+kantele+kitara+harmonikka),, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3830,SibA/Primo ohjelmapalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32250,SibA/Puhaltimien aineryhmƤ (lyƶmƤsoittimet+harppu),, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,3400,SibA/SeinƤjoen toimipaikka,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3600,SibA/Kirjasto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3705,SibA/Henkilƶstƶ- ja johdon tukipalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3710,SibA/ViestintƤpalvelut,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3730,SibA/Soitinten huolto ja hankinnat,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32180,SibA/SƤvellyksen ja musiikinteorian aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,31165,SibA/Taidehallinnon aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3814,SibA/Tohtoriohjelma,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3740,3740,SibA/Tuotantopalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3780,SibA/Keskitetyt opintopalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3111,37810,"Musiikkikasvatuksen, jazzin ja kansanmusiikin opintopalvelut",, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,37820,Klassisen musiikin opintopalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,37830,Orkesterikoulutuksen opintopalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3800,SibA/Dekaani,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,3222,32260,SibA/Vanhan musiikin aineryhmƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3801,SibA/Varadekaani I,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3802,SibA/Varadekaani II,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3811,SibA/KehittƤmiskeskus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3814,SibA/Tohtoriohjelma,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3817,SibA/KansainvƤliset asiat,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3710,SibA/ViestintƤpalvelut,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,3825,SibA/Yritysyhteistyƶ,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7000,Henkilƶstƶ- ja johdon tukipalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7110,Varainhankinta,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,30,Sibelius-Akatemia,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42102,T/Dramaturgian koulutusohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,220,T/EsittƤvien taiteiden tutkimuskeskus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42300,T/Johdon tuki,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,200,T/Johto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42113,T/Kirjoittamisen maisteriohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42108,T/Koreografian maisteriohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42103,T/Live Art and Performance Studies,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42104,T/MA in Eco and Contemp.Per,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42100,T/NƤyttelijƤntaiteen koulutusohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,423040,T/NƤyttƤmƶ ja tarpeisto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42101,T/Ohjauksen koulutusohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42301,T/Opetuksen koordinointipalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42302,T/Opetuksen suunnittelupalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,210,T/Opetus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,230,T/Palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,423041,T/Puvusto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42107,T/Tanssijantaiteen maisteriohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42106,T/Tanssin koulutusohjelma BA,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42111,T/Tanssinopettajan maisteriohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42112,T/Teatteriopettajan maisteriohjelma,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42200,T/Tohtorikoulutus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42304,T/Tuotantotekniset palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42303,T/Tuottajapalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42201,T/Tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42105,T/Utbildningsprog. I skĆ„despelarkonst,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,423042,T/Valo ja ƤƤni,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42109,T/Valosuunnittelun koulutus- ja maisteriohjelmat,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42114,T/Yhteisen opetuksen keskus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,42110,T/ƃā€žĆ¤nisuunnittelun koulutus- ja maisteriohjelmat,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7200,Talouspalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7300,IT/Tietohallinto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7310,IT/Sovellus- ja infrapalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7330,IT/Tukipalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2101,TeaK/Dekaani,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2202,TeaK/Dramaturgian koulutusohj,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2260,TeaK/EsittƤvien Taiteiden Tutk,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2831,TeaK/Esitystuotanto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2811,TeaK/Hallintopalvelut,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2823,TeaK/Kirjasto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2233,TeaK/Koreografin maisteriohj,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2203,TeaK/Live Art and Performance S,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2210,TeaK/MA in Ecology and Contemporary Performance,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2200,TeaK/NƤyttelijƤtyƶn koulutusoh,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2832,TeaK/NƤyttƤmƶ ja tarpeisto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2201,TeaK/Ohjauksen koulutusohjelma,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2822,TeaK/Opetuksen suunnitt ja keh,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2830,TeaK/Opetusteatteri,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2821,TeaK/Opintohallinto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2833,TeaK/Puvusto,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2232,TeaK/Tanssijan maisteriohjelma,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2231,TeaK/Tanssin koulutusohjelma BA,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2251,TeaK/Tanssiopettajan koulutus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2252,TeaK/Teatteriopettajan koulutus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2220,TeaK/Utbildingsprog. fƶr skĆ„despelarkonst,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2834,TeaK/Valo ja ƤƤni,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2241,TeaK/Valosuunnittelun koulutus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2812,TeaK/ViestintƤ,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2270,TeaK/Yhteisen opetuksen keskus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,2242,TeaK/ƄƤnisuunnittelun koulutus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,20,Teatterikorkeakoulu,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7600,Toimitilapalvelut,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7610,Kallio-Kuninkala,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7690,N-talohanke,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7800,Kirjasto,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8000,Rehtoraatti,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8200,Opetus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8210,Kielten opetus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8300,Jatkokoulutus ja tutkimus,, -Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8400,Avoin kampus,, Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,8410,TƤydennyskoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,,,,http://isni.org/isni/0000000103468395,36 -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311257,Media-alan koulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H2,311003,Henkilƶstƶpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311004,Talous- ja projektipalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H2,311007,Matkapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311013,Laadunhallinta ja toiminnanohjaus,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7110,Varainhankinta,, +Taideyliopisto,Uniarts Helsinki,Konstuniversitetet,10103,,7100,ViestintƤpalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47203,Y/Avoimen kampuksen palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,740,Y/Avoimen kampuksen yhteinen opetus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47401,Y/Avoin kampus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47403,Y/Erikoistumisopinnot,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,720,Y/HR-ja palvelujohtajan alaisuudessa olevat palvel,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47200,Y/Henkilƶstƶ- ja kehittƤmispalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472000,Y/Henkilƶstƶpalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47300,Y/Historiafoorumi,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47211,Y/IT/Tietohallinto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472123,Y/Iltavahtimestarit,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,71,Y/Johto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472001,Y/KehittƤmispalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47400,Y/Kielten opetus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47204,Y/Kirjasto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472040,Y/Kirjasto Siba,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472041,Y/Kirjasto TeaK/Kuva,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47221,Y/Kumppanuuspalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47230,Y/Lakipalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,723,Y/Lakipalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47201,Y/Opintopalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,72,Y/Palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,730,Y/Research Hub,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472111,Y/Sovellus- ja infrapalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472102,Y/SƶrnƤisten kampus_ta,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472122,Y/SƶrnƤisten kampus_ti,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472202,Y/SƶrnƤisten kampus_vi,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47302,Y/Taidekasvatuksen tutkimuksen tk CERADA,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47301,Y/Taiteellisen tutkimuksen tk CfAR,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,721,Y/Talousjohtajan alaisuudessa olevat palvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47210,Y/Talouspalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472110,Y/Tietohallinto,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47212,Y/Tilapalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47310,Y/Tohtorikoulutus ja tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,731,Y/Tohtorikoulutus ja tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472112,Y/Tukipalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47202,Y/Tutkimuspalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472101,Y/Tƶƶlƶn kampus_ta,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472121,Y/Tƶƶlƶn kampus_ti,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472201,Y/Tƶƶlƶn kampus_vi,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,722,Y/ViestintƤ- ja kumppanuuspalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47220,Y/ViestintƤpalvelut,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,47402,Y/Yhteinen opetus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,74,Y/Yhteinen opetus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,73,Y/Yhteinen tutkimus,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472100,Y/Yhteiset_ta,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472120,Y/Yhteiset_ti,, +Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472200,Y/Yhteiset_vi,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,,,,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311402,Ajoneuvotekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311402,Ajoneuvotekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104010,Ammatillinen opettajankoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4140000,Ammattikorkeakoulun johto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4140,Ammattikorkeakoulun johto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4146,Ammattikorkeakoulun yhteiset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311831,Ammattipedagoginen TKI,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120010,Ammattipedagoginen TKI,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311831,Ammattipedagoginen TKI;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311021,Asiakirjahallinnon ja johdon tuki;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311414,Automaatioteknologia YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311414,Automaatioteknologia YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106030,Autotekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106030,Autotekniikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311110,Avoin amk,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4131000,Avoin amk,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105040,Bachelor's Degree Programme in Environmental Engin,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102030,Bachelor's Degree Programme in International Busin,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103030,Bachelor's Degree Programme in Media and Arts,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107030,Bachelor's Degree Programme in Nursing,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311505,Bioanalytiikan ko / Bioanalyytikko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311505,Bioanalytiikan ko;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101030,Bioanalytiikko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101040,Bioanalytiikko SeinƤjoki,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101030,Bioanalyytikon tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311408,Biotuote- ja prosessitekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311408,Biotuote- ja prosessitekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105030,Biotuotetekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105030,Biotuotetekniikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311410,DP in Energy and Environmental Engineering;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105040,DP in Environmental Engineering,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311301,DP in International Business ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103030,DP in Media and Arts,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311255,DP in Media and Arts;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311511,DP in Nursing;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106100,Dataosaaminen ja tekoƤly YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311410,Degree Programme in Energy and Environmental Engineering,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102040,Degree Programme in IBM/MEL (MBA),, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311301,Degree Programme in International Business,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102030,Degree Programme in International Business,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311255,Degree Programme in Media and Arts,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107030,Degree Programme in Nursing,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130000,EDU hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121030,EDUn projektit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311254,Elokuvan ja television ko;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311524,"Ensihoitaja-, KƤtilƶ- ja terveydenhoitajakoulutu",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107040,"Ensihoitaja-, kƤtilƶ- ja terveydenhoitaja",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311504,Ensihoitajakoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107040,"Ensihoitajan, kƤtilƶn ja terveydenhoitajan tutki",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4132000,Erikoistumiskoulutukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4132000,Erikoistumisopinnot,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107050,Fiiliskeskus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311014,Floworks,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311017,KehittƤmisyksikkƶ,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311445,Fysiikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104030,Fysiikka ja matematiikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311445,Fysiikka;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101050,Fysioterapeutin tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101050,Fysioterapeutti,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101060,Fysioterapiaklinikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311507,Fysioterapian ko / Fysioterapeutti,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311507,Fysioterapian ko;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141030,Hallintopalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311021,Hallintopalvelut (ja tapahtumapalvelut),, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311022,Liikuntapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311030,Hankintapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311031,ViestintƤpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311040,Kiinteistƶpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311041,Tilapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311050,Kirjasto- ja tietopalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311060,Tietohallinto,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311080,Opintopalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311030,Hankintapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141,Henkilƶstƶ- ja viestintƤpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000052,Henkilƶstƶ- ja viestintƤpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H2,Henkilƶstƶhallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H2,Henkilƶstƶhallinto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141020,Henkilƶstƶn kehittƤminen,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H2,311003,Henkilƶstƶpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141000,Henkilƶstƶpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311003,Henkilƶstƶpalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101,Hyvinvointi ja terveysteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000224,Hyvinvointi ja terveysteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101010,"Hyvinvointi ja terveysteknologia laboratoriot, opetustilat ja -tarvikkeet",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101020,Hyvinvointi ja terveysteknologia varaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101000,Hyvinvointi ja terveyteknologia hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101110,Hyvinvointiteknologia YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311514,Hyvinvointiteknologian ko / YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311514,Hyvinvointiteknologian ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101110,Hyvinvointiteknologian ylempi tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142030,Impact areas,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4146000,Investoinnit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102090,KansainvƤlinen myynti ja myynnin johtaminen YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311302,KansainvƤlisen myynnin ja myynnin johtamisen koul,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311090,KansainvƤliset palvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311110,Avoin amk,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144030,KansainvƤliset palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311090,KansainvƤliset palvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H6,KehittƤminen,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144000,KehittƤminen,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311014,KehittƤminen (ent. Floworks);;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H6,KehittƤminen;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311017,KehittƤmisyksikkƶ,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311017,KehittƤmisyksikkƶ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104040,Kielet,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311112,Kielipalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311250,"Musiikin ko, muusikko, Musiikkipedagogi",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311112,Kielipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311040,Kiinteistƶpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311040,Kiinteistƶpalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4145,Kirjasto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4145000,Kirjasto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311050,Kirjasto- ja tietopalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311050,Kirjasto- ja tietopalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311529,Kliinisen asiantuntijan koulutus YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311529,Kliinisen asiantuntijan koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311401,Konetekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311401,Konetekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106040,Konetekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106040,Konetekniikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150000,Konsernikirjaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H4,Korkeakoulupalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H4,Korkeakoulupalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144,Koulutuksen ja oppimisen palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000051,Koulutuksen ja oppimisen palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144010,Koulutuksen kehittƤmispalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144020,Koulutuksen tukipalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311100,Koulutuksen tukipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108,Koulutuksen yhteiset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H5,Koulutus ja TKI-toiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H5,Koulutus ja TKI-toiminta;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130050,Koulutusvienti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311252,Kuvataiteen ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311255,Degree Programme in Media and Arts,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311256,Mediatuottamisen ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311252,Kuvataiteen ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102050,Kykylaakso,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311599,KƤtilƶkoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311013,Laadunhallinta ja toiminnanohjaus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143010,Laadunhallinta ja toiminnanohjaus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143010,Laatu ja toiminnanohjaus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311409,Laboratoriotekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311409,Laboratoriotekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105050,Laboratoriotekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105050,Laboratoriotekniikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311300,Liiketalouden ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311300,Liiketalouden ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311303,Liiketalouden ko maakunnat;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102060,Liiketalous,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130080,Liiketoiminnan TKI-projektit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130070,Liiketoiminnan kehittƤminen,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130010,Liiketoiminnan koulutukset ja osaamiset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H3,Liiketoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102,Liiketoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130,Liiketoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000221,Liiketoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H52,Liiketoiminta ja palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102000,Liiketoiminta ja palvelut hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102010,"Liiketoiminta ja palvelut laboratoriot, opetustila",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102010,"Liiketoiminta ja palvelut laboratoriot, opetustilat ja -tarvikkeet",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102020,Liiketoiminta ja palvelut varaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H52,Liiketoiminta ja palvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H3,Liiketoiminta;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143,Liiketoimintapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143000,Liiketoimintapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000049,Liiketoimintapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144040,Liikkuvuusprojektit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311022,Liikuntapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144090,Liikuntapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311022,Liikuntapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103060,MA in Screenwriting YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311921,MD in Information Technology;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311558,MD in International Business Management / in Educa,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311456,MD in Management and Economy in the International,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311422,MD in Risk Management and Circular Economy;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311259,MD in Screenwriting YAMK;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311259,MDP in Screenwriting YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142010,Maakuntakorkeakoulu,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311456,Master's Degree Programme in Management and Economy in the International Forest Sector,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311921,MasterĀ“s Degree in Information Technology,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311444,Matematiikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311444,Matematiikka;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H2,311007,Matkapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141010,Matkapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311007,Matkapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103,"Media, musiikki ja taide",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000220,"Media, musiikki ja taide",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103000,"Media, musiikki ja taide hallinto",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103010,"Media, musiikki ja taide laboratoriot, opetustilat",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103010,"Media, musiikki ja taide laboratoriot, opetustilat ja -tarvikkeet",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103020,"Media, musiikki ja taide varaukset",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103040,Media-ala,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311257,Media-alan koulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311257,Media-alan koulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103070,"Mediatuottaminen, Emerging Media YAMK",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311256,Mediatuottamisen ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311256,Mediatuottamisen ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311450,MetsƤtalouden ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311450,MetsƤtalouden ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105060,MetsƤtalouden tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105060,MetsƤtalous,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311260,"Musiikin ko, Musiikkipedagogi YAMK",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311300,Liiketalouden ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311301,Degree Programme in International Business,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311302,YrittƤjyyden ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311350,TietojenkƤsittelyn ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311351,TietojƤrjestelmƤosaamisen ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311401,Konetekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311402,Ajoneuvotekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311260,"Musiikin ko, Musiikkipedagogi YAMK;;",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311251,"Musiikin ko, Musiikkipedagogi;;",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311250,"Musiikin ko, muusikko, Musiikkipedagogi",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311250,"Musiikin ko, muusikko;;",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103080,Musiikin ylempi tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103080,Musiikki YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103050,"Musiikkipedagogi, muusikko",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311080,Opintopalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144080,Opintopalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311080,Opintopalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144060,Opiskelijarekrytointi,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000204,Oppimisen ja hyvinvoinnin palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144070,Oppimisen ja hyvinvoinnin tuki,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144070,Oppimisen tuki,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130020,Osaamisen myynti,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121020,PEDA koulutushankkeet,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311558,Palvelu- ja projektiliiketoiminnan ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130090,Palvelujen katteeton myynti,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311559,Palveluliiketoiminnan johtamisen koulutus YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311559,Palveluliiketoiminnan johtamisen koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311551,Palveluliiketoiminnan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311551,Palveluliiketoiminnan ko;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104,Pedagogiset ratkaisut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000219,Pedagogiset ratkaisut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104000,Pedagogiset ratkaisut hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104020,Pedagogiset ratkaisut varaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4152000,Perusrahoituksen kirjaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102070,Proakatemia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311310,Proakatemia YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102100,Proakatemia/YrittƤjyyden YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150030,Rahastojen poistot,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105,Rakennettu ympƤristƶ ja biotalous,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000222,Rakennettu ympƤristƶ ja biotalous,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105000,Rakennettu ympƤristƶ ja biotalous hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105010,"Rakennettu ympƤristƶ ja biotalous laboratoriot,",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105010,"Rakennettu ympƤristƶ ja biotalous laboratoriot, koulutustilat ja -tarvikkeet",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105020,Rakennettu ympƤristƶ ja biotalous varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311403,Rakennus- ja yhdyskuntatekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311404,SƤhkƶ- ja automaatiotekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311405,Tieto- ja viestintƤtekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311406,"Talotekniikan ko , sƤhkƶinen talotekniikka, LVI-tekniikka",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311403,Rakennus- ja yhdyskuntatekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105080,Rakennus- ja yhdyskuntatekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311407,Rakennusalan tyƶnjohdon ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311408,Biotuote- ja prosessitekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311409,Laboratoriotekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311410,Degree Programme in Energy and Environmental Engineering,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311411,Rakentamisen ja talotekniikan ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311413,Teknologiaosaamisen johtamisen ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311414,Automaatioteknologia YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311407,Rakennusalan tyƶnjohdon ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105090,Rakennusalan tyƶnjohdon tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105090,Rakennusalan tyƶnjohto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105070,Rakennusarkkitehdin tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311423,Rakennusarkkitehti,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311444,Matematiikka,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311445,Fysiikka,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311450,MetsƤtalouden ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311456,Master's Degree Programme in Management and Economy in the International Forest Sector,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311502,Sairaanhoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311503,Sosionomikoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311504,Ensihoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311505,Bioanalytiikan ko / Bioanalyytikko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311507,Fysioterapian ko / Fysioterapeutti,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105070,Rakennusarkkitehti,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311423,Rakennusarkkitehti;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105080,Rakennustekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105120,Rakentaminen YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311424,Rakentaminen YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H53,Rakentaminen ja ympƤristƶteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H53,Rakentaminen ja ympƤristƶteknologia;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311411,Rakentamisen ja talotekniikan ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H1,Rehtorin toimisto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311000,Rehtorin toimisto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102080,Restonomi,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102110,Restonomi YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105130,Risk Management & Circular Economy (YAMK),, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101080,Rƶntgenhoitaja,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101100,Rƶntgenhoitaja SeinƤjoki,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311510,Rƶntgenhoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311514,Hyvinvointiteknologian ko / YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311510,Rƶntgenhoitajakoulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107060,Sairaanhoitaja,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311502,Sairaanhoitajakoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311502,Sairaanhoitajakoulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107060,Sairaanhoitajan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311417,Software Engineering;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311516,Sosiaali- ja terveysalan johtamisen koulutus (YAMK),, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101120,Sosiaaliala YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101070,Sosionomi,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311503,Sosionomikoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311503,Sosionomikoulutus;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311517,Sosionomin koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311518,Terveyden edistƤmisen koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311524,Terveydenhoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311529,Kliinisen asiantuntijan koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311551,Palveluliiketoiminnan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311558,Palvelu- ja projektiliiketoiminnan ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311559,Palveluliiketoiminnan johtamisen koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311599,KƤtilƶkoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311517,Sosionomin koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101090,Sote monimuunto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4147,Strategiarahoitus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4147000,Strategiarahoitus OKM,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120020,Strateginen TK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144050,Summer School,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311404,SƤhkƶ- ja automaatiotekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311404,SƤhkƶ- ja automaatiotekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106080,SƤhkƶ- ja automaatiotekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106080,SƤhkƶ- ja automaatiotekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311600,TAMK Ammatillinen opettajankoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311831,Ammattipedagoginen TKI,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311921,MasterĀ“s Degree in Information Technology,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311925,TyƶelƤmƤpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,,H1,Rehtorin toimisto,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H2,Henkilƶstƶhallinto,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H3,Liiketoiminta,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H4,Korkeakoulupalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H5,Koulutus ja TKI-toiminta,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H55,Terveys- ja sosiaalipalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311600,TAMK Ammatillinen opettajankoulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311110,TAMK EDU;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104040,TAMK Kielet ja viestintƤ,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104030,TAMK Matematiikka ja fysiikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142,TKI ja maksullinen palvelutoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000050,TKI ja maksullinen palvelutoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142000,TKI ja maksullinen palvelutoiminta hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120000,TKI-palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000063,TKI-palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311800,TKI-palvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121030,TREE TAMK projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H51,"Taide, musiikki ja media",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H52,Liiketoiminta ja palvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H53,Rakentaminen ja ympƤristƶteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H51,"Taide, musiikki ja media;;",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311406,"Talotekniikan ko , sƤhkƶinen talotekniikka, LVI-tekniikka",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311411,Talotekniikan ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311406,"Talotekniikan ko, LVI-tekniikka / sƤhkƶinen talo",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105100,Talotekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105100,Talotekniikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105110,Talotekniikka YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311004,Talous- ja projektipalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311004,Talous- ja projektipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143020,"Talous-, projekti- ja hankintapalvelut",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4147010,Tampere3,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4100,Tampereen ammattikorkeakoulu,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000014,Tampereen ammattikorkeakoulu,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150010,Tekniset kirjaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150,Tekniset kustannuspaikat,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106110,Teknologiajohtaminen koulutus YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311413,Teknologiaosaamisen johtamisen ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311413,Teknologiaosaamisen johtamisen ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106110,Teknologiaosaamisen johtamisen ylempi tutkinto-ohj,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H54,Teollisuusteknologia,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H6,KehittƤminen,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106,Teollisuusteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000223,Teollisuusteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106000,Teollisuusteknologia hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106010,"Teollisuusteknologia laboratoriot, opetustilat ja",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106010,"Teollisuusteknologia laboratoriot, opetustilat ja -tarvikkeet",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106020,Teollisuusteknologia varaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H54,Teollisuusteknologia;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311518,Terveyden edistƤmisen koulutus YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311518,Terveyden edistƤmisen koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107070,Terveyden edistƤmisen ylempi tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311524,Terveydenhoitajakoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107,Terveys,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000225,Terveys,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107070,Terveys YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107000,Terveys hallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107010,"Terveys laboratoriot, opetustilat ja -tarvikkeet",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107020,Terveys varaukset,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H55,Terveys- ja sosiaalipalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311516,Terveys- ja sosiaalipalvelut YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H55,Terveys- ja sosiaalipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106070,TiTe Software Engineering,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311405,Tieto- ja viestintƤtekniikan ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311405,Tieto- ja viestintƤtekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311060,Tietohallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143040,Tietohallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143050,Tietohallinto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311060,Tietohallinto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106050,TietojenkƤsittely,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311350,TietojenkƤsittely ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311350,TietojenkƤsittelyn ko,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106050,TietojenkƤsittelyn tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106120,TietojƤrjestelmƤosaaminen YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311351,TietojƤrjestelmƤosaamisen ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311351,TietojƤrjestelmƤosaamisen ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106060,Tietotekniikan tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106060,Tietotekniikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143030,Tila- ja kiinteistƶpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143060,Tila- ja kiinteistƶpalvelut TAU,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311041,Tilapalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311041,Tilapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130030,Tuotteistetut palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120,"Tutkimus, kehitys ja innovaatiotoiminta",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120000,"Tutkimus-, kehitys ja innovaatiot",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108010,Tutkintoon johtava myytƤvƤ koulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311925,TyƶelƤmƤpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130040,Tyƶvoimakoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4131,TƤydennyskoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108010,TƤydennyskoulutus ja myytƤvƤt palvelut (TAMK ED,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121000,Ulkoiset TKI-projektit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121010,Ulkoiset pedagogiset TKI-projektit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121,Ulkoiset projektit,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311031,ViestintƤpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141040,ViestintƤpalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311031,ViestintƤpalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130060,Vuokraus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142020,Y-Kampus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311924,Y-Kampus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142020,Y-kampus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108000,Yhteiset opinnot,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311302,YrittƤjyyden ko YAMK,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106090,Ƅlyteollisuus YAMK,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,,,,http://isni.org/isni/0000000093279856,1972 -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,220,Yleispalvelujen yhteiset,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,221,Strateginen johtaminen,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,223,ViestintƤ,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,224,Kumppanuudet,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,225,Talous ja toiminnan ohjaus,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,911,Arkkitehtuurin laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,811,Elektroniikan ja tietoliikennetekniikan laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,611,Fysiikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,226,Henkilƶstƶpalvelut,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,227,Tietohallinto,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,228,Tilapalvelut,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,23,231,Oppimisen tuki,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,23,232,Tutkintopalvelut,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,23,233,Opintopalvelut,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,24,241,Tutkimuksen kehittƤmispalvelut,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,24,242,Tutkimuspalvelut,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,711,Hydrauliikan ja automatiikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,24,243,Innovaatiopalvelut,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,612,Kemian ja biotekniikan laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,913,Kielikeskus,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,24,244,Kirjasto,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,25,251,Tampere3,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,715,Kone- ja tuotantotekniikan laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,224,Kumppanuudet,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,610,Luonnontieteiden tiedekunta,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,611,Fysiikan laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,612,Kemian ja biotekniikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,613,Matematiikan laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,614,Optoelektroniikan tutkimuslaitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,710,Teknisten tieteiden tiedekunta,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,711,Hydrauliikan ja automatiikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,712,Materiaaliopin laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,713,Systeemitekniikan laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,715,Kone- ja tuotantotekniikan laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,810,Tieto- ja sƤhkƶtekniikan tiedekunta,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,811,Elektroniikan ja tietoliikennetekniikan laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,23,233,Opintopalvelut,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,23,231,Oppimisen tuki,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,61,614,Optoelektroniikan tutkimuslaitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,914,Porin laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,912,Rakennustekniikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,812,SignaalinkƤsittelyn laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,221,Strateginen johtaminen,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,713,Systeemitekniikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,813,SƤhkƶtekniikan laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,814,Tietotekniikan laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,910,Talouden ja rakentamisen tiedekunta,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,911,Arkkitehtuurin laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,912,Rakennustekniikan laitos,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,913,Kielikeskus,, -Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,914,Porin laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,225,Talous ja toiminnan ohjaus,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,25,251,Tampere3,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,71,710,Teknisten tieteiden tiedekunta,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,915,Teollisuustalouden laitos,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,916,Tiedonhallinnan ja logistiikan laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,810,Tieto- ja sƤhkƶtekniikan tiedekunta,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,227,Tietohallinto,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,81,814,Tietotekniikan laitos,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,228,Tilapalvelut,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,24,241,Tutkimuksen kehittƤmispalvelut,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,24,242,Tutkimuspalvelut,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,23,232,Tutkintopalvelut,, Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,91,918,TƤydennyskoulutuskeskus Edutech,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,223,ViestintƤ,, +Tampereen teknillinen yliopisto,Tampere University of Technology,Tammerfors tekniska universitet,01915,22,220,Yleispalvelujen yhteiset,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,,,http://isni.org/isni/0000000123146254,37 +Tampereen yliopisto,Tampere University,,10122,,,,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2501,BioMediTech,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8150,Digitalisaatio-kehittƤmishanke,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8220,Henkilƶstƶn kehittƤminen ja assistenttipalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8210,Henkilƶstƶpalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1010,Informaatioteknologian ja viestinnƤn tiedekunta,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2502,Informaatiotieteiden yksikkƶ,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6160,Innovaatiokulttuuri-kehittƤmishanke,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6150,Innovaatiopalvelut ja kumppanuudet,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5160,Jatkuva oppiminen -kehittƤmishanke,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,2120,Johdon tuki ja hallinnon kehittƤminen,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1020,Johtamisen ja talouden tiedekunta,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2503,Johtamiskorkeakoulu,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8140,Kampuskehitys-kehittƤmishanke,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1030,Kasvatustieteiden ja kulttuurin tiedekunta,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,02504,Kasvatustieteiden yksikkƶ,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2505,"Kieli-, kƤƤnnƶs- ja kirjallisuustieteiden yksikkƶ",, +Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2572,Kielikeskus,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,2010,Kielikeskus,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5140,Kielikeskus,, +Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2573,Kirjasto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5030,Kirjasto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6140,Kirjasto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5020,Koulutus ja oppiminen,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5120,Koulutus ja oppiminen,, +Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2580,Laboratoriopalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6020,Laboratoriopalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6120,Laboratoriopalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1040,LƤƤketieteen ja terveysteknologian tiedekunta,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2506,LƤƤketieteen yksikkƶ,, -Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,02507,"ViestinnƤn, median ja teatterin yksikkƶ",, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8010,Osaaminen ja kulttuuri,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1050,Rakennetun ympƤristƶn tiedekunta,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8020,Talous- ja tilapalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8120,Talouspalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1060,Tekniikan ja luonnontieteiden tiedekunta,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2508,Terveystieteiden yksikkƶ,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6030,Tietoarkisto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6130,Tietoarkisto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5010,Tietohallinto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8130,Tietohallinto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8110,Toiminnanohjaus ja analytiikka,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,4010,Toiminnanohjaus ja suunnittelu,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6190,Tutkijakollegium,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6180,Tutkijakoulu,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6010,Tutkimus ja innovaatiot,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6110,Tutkimuspalvelut,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,5150,TyƶelƤmƤyhteydet ja jatkuva oppiminen,, +Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,02507,"ViestinnƤn, median ja teatterin yksikkƶ",, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8230,ViestintƤ ja markkinointi,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,8030,"ViestintƤ, brƤndi ja markkinointi",, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,02509,Yhteiskunta- ja kulttuuritieteiden yksikkƶ,, -Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2580,Laboratoriopalvelut,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2571,Yhteiskuntatieteellinen tietoarkisto,, -Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2572,Kielikeskus,, -Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2573,Kirjasto,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1070,Yhteiskuntatieteiden tiedekunta,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,3010,Yhteistyƶ ja kumppanuudet,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,6170,Yhteistyƶ ja kumppanuudet -kehittƤmishanke,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,2110,Yliopistokeskusten koordinaatio,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1120,Yliopiston hallitus,, +Tampereen yliopisto,Tampere University,Tammerfors universitet,10122,,1110,Yliopiston johto,, Tampereen yliopisto,University of Tampere,Tammerfors universitet,01905,,2592,Yliopistopalvelut,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,,,,http://isni.org/isni/0000000404747718,2131 -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9611,961111,Yhteiset palvelut,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962011,"Liiketalous, ICT ja kemiantekniikka, yht.",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962012,Liiketalous,, +Tampereen yliopistollisen sairaalan erityisvastuualue,Tampere University Hospital Catchment Area,,08265978,,,,, +Teknologian tutkimuskeskus VTT Oy,VTT Technical Research Centre of Finland Ltd,,26473754,,,,, +Terveyden ja hyvinvoinnin laitos,Finnish Institute for Health and Welfare,,5610017,,,,, +Tieteellisten seurain valtuuskunta,Federation of Finnish Learned Societies,,05247045,,,,, +Tilastokeskus,Statistics Finland,,02454911,,,,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,,,,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965020,"Amk, Ensihoito, terveydenhoito ja kƤtilƶtyƶ",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963013,"Amk, EsittƤvƤt taiteet",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961135,"Amk, HR-palvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962513,"Amk, ICT",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962514,"Amk, Kemianteollisuus",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961142,"Amk, Kiinteistƶpalvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961133,"Amk, Kirjasto- ja tietopalvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961121,"Amk, Koulutuksen kehittƤminen",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961124,"Amk, Kumppanuus ja kehittƤminen",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965021,"Amk, Kuntoutus, suun terveydenhoito ja diagnostise",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965021,"Amk, Kuntoutus, suun terveydenhoito ja diagnostiset palvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963014,"Amk, Kuvataide",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962517,"Amk, Liiketoiminta ja palvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962517,"Amk, Logistiikka, palvelut ja tuotantotalous",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963015,"Amk, Media-ala",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961132,"Amk, Opiskelijapalvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961136,"Amk, OppimisympƤristƶpalvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961125,"Amk, Projektitoimisto",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962518,"Amk, Rakennusteollisuus",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965022,"Amk, Sairaanhoito",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965023,"Amk, Sosiaali- ja kasvatusala",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961123,"Amk, TKI-toiminta",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963011,"Amk, Taideakatemia, yht.",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963016,"Amk, Taideakatemia/Master School",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961141,"Amk, Talous ja toiminnanohjaus",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962511,"Amk, Tekniikka ja liiketoiminta, yht.",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962516,"Amk, Teknologiateollisuus",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962515,"Amk, Teli/Master School",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965024,"Amk, Terhy/Master School",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965011,"Amk, Terveys ja hyvinvointi, yht.",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961122,"Amk, TyƶelƤmƤpalvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961134,"Amk, ViestintƤpalvelut",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961111,"Amk, Yleishallinto",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962512,"Amk, YrittƤjyys ja myynti",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963013,EsittƤvƤt taiteet,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964014,"Hankinnat, myynti ja logistiikka",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962013,ICT,, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962014,Kemiantekniikka,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962015,LIKe monimuotokoulutukset ja YAMK,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963011,"Taideakatemia, yht.",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963012,"Taideakatemia, projektit",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963013,EsittƤvƤt taiteet,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964012,"Konetekniikka, meritekniikka ja muotoilu",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965013,Kuntoutus ja terveysalan erityisalueet,, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963014,Kuvataide,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962015,LIKe monimuotokoulutukset ja YAMK,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962012,Liiketalous,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9620,962011,"Liiketalous, ICT ja kemiantekniikka, yht.",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963015,Media-ala,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963016,Taideakatemia monimuotokoulutukset ja YAMK,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964011,"Tekniikka, ympƤristƶ ja talous, yht.",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964012,"Konetekniikka, meritekniikka ja muotoilu",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964013,"Rakentaminen, ympƤristƶ ja energia",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964014,"Hankinnat, myynti ja logistiikka",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965014,Sosiaaliala,, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964015,TYT monimuotokoulutukset ja YAMK,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963016,Taideakatemia monimuotokoulutukset ja YAMK,, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963012,"Taideakatemia, projektit",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9630,963011,"Taideakatemia, yht.",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9640,964011,"Tekniikka, ympƤristƶ ja talous, yht.",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965015,Terhy monimuotokoulutukset ja YAMK,, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965011,"Terveys ja hyvinvointi, yht.",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965012,Terveysala,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965013,Kuntoutus ja terveysalan erityisalueet,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965014,Sosiaaliala,, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9650,965015,Terhy monimuotokoulutukset ja YAMK,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,,,http://isni.org/isni/0000000121981512,41 -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601000,Johto ja yliopiston yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601004,Muutostuki,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601011,Kiinanmylly,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601012,Seili,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601100,Talouspalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601105,Reskontrapalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601106,Matkapalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601107,Projektipalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601200,UTUGS,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601201,Tutkijatohtoriohjelma,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601210,LLK:n yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601211,LLK Matematiikan ja tilastotieteen lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601212,LLK Biokemian laitos,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601213,LLK Kemian lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601214,LLK Biologian lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601215,LLK Fysiikan ja tƤhtitieteen lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601216,LLK Informaatioteknologian lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601217,LLK Maantieteen ja geologian lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601218,LLK BiolƤƤketieteen lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601219,LLK Kliininen laitos,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601220,LLK Hoitotieteen lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601221,LLK HammaslƤƤketieteen lts,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601230,TIAS-tutkijakollegiumin yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601231,TIAS Humanistinen tdk,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601232,TIAS Yhteiskuntatieteellinen tdk,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601233,TIAS Kasvatustieteiden tdk,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601234,TIAS Oikeustieteellinen tdk,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601235,TIAS Kauppakorkeakoulu,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601240,Tutkimuksen toimialan yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601241,Tutkimuksen kehittƤminen,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601242,Tutkimuspalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601250,SKY yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601251,Innovaatiopalvelut ja koulutusvienti,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601252,KehittƤminen ja suunnittelu,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601260,TCSI yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601261,TCSI International,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601300,ViestintƤ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601400,Henkilƶstƶpalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601401,IT-palvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601405,Yleispalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601406,Palvelujohtajan yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601410,Koulutuksen toimialan yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601411,KansainvƤliset palvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601412,Ohjauksen ja koulutuksen tukipalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601413,Opiskelija- ja hakijapalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601414,Liikuntapalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601415,Koulutuksen toimiala tiedekunnissa,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601416,Avoin yliopisto 1.8. alkaen,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601420,Kirjaston yhteiset palvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601421,Oppimisen palvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601422,Tutkimuksen palvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601423,Tietoaineistojen saatavuuspalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601424,Hankintapalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601430,Toimitilapalvelujen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601431,Tilasuunnittelu,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601432,Tila- ja kuljetuspalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601433,Majoituspalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601434,Protopaja,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601435,Yliopiston yhteiset tilat,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601436,Turvallisuus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601510,VyƶrytettƤvƤt kustannukset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601611,Drug Development and Diagnostics,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601621,Learning and Education,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601631,Bioimaging,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601641,Digital Futures,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601651,New Bioresources,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601900,Rahastot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601930,Tiedekuntien stipendirahasto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602000,Humanistisen tiedekunnan yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602001,Humanistisen tiedekunnan hallintopalvelu,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602002,Baltic Sea Regions Studies,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602100,Kieli- ja kƤƤnnƶstieteiden laitoksen yht,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602101,Englannin kieli,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602102,Espanja,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602103,Fonetiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602104,Italia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602105,Klassiset kielet ja antiikin kulttuuri,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602106,Pohjoismaiset kielet,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602107,Ranska,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602108,Saksan kieli,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602109,Volgalaiskielten tutkimusyksikkƶ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602110,Suomen kieli ja suom-ugrilainen kielent,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602111,VenƤjƤn kieli,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602114,Lauseopin arkisto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602200,"Historian, kultt ja tait tutk lts:n yht",, +Turun ammattikorkeakoulu,Turku University of Applied Sciences,,02509,9611,961111,Yhteiset palvelut,, +Turun yliopisto,University of Turku,,10089,,,,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609023,ARD kehittƤminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609021,ARD koulutus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609022,ARD tutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609020,ARD yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609011,AVO Humanistisen tdk opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609012,AVO Kasvatustieteiden tdk opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609017,AVO Kauppakorkeakoulun opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609015,AVO LƤƤketieteellisen tdk opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609013,AVO Matemaattis-luonnon tdk opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609018,AVO Muut koulutukset ja hankkeet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609016,AVO Oikeustieteellisen tdk opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609014,AVO Yhteiskuntatieteellisen tdk opinnot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604304,Alakoulu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609022,Aluekehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607301,Anestesiologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602201,Arkeologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602202,Folkloristiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602203,Kansatiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602204,Kotimainen kirjallisuus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602205,Kulttuurihistoria,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602206,Mediatutkimus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602207,Musiikkitiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602208,Sukupuolentutkimus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602209,Suomen historia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602210,Taidehistoria,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602211,Uskontotiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602212,Yleinen historia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602213,Yleinen kirjallisuustiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602214,Kulttuurituotannon ja maisemantutkim ko,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602215,Kulttuurientutkimuksen arkisto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602216,Kalevala instituutti,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602217,Luova kirjoittaminen,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602218,Museologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603000,Yhteisk tdk yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603001,Yhteisk tdk hallintopalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603100,Psykologian ja logopedian laitoksen yht,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603101,Filosofia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603102,Logopedia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603103,Psykologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603104,PSYKONET -verkosto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603105,Kognitiivisen neurotieteen tutk yks (KNT,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603106,Oppimistutkimuksen keskus (OTUK),, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603107,KiVa Koulu,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603111,KiVa Koulu elinkeinotoiminta,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603200,"Filosofian, poliittisen historian ja valtio-opin lts yht",, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603201,Poliittinen historia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603202,Valtio-oppi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603203,Eduskuntatutkimuksen keskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603204,ItƤ-Aasian tutkimus- ja koulutusk (CEAS),, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603205,Yliopistojen Aasia-verkosto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603206,The Public Choice Research Centre (PCRC),, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603207,John Morton -keskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603208,Filosofia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603300,Sosiaalitieteiden laitoksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603301,Sosiaalipolitiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603302,Sosiaalityƶ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603303,Sosiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603304,Taloussosiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603305,Sosiaalivak huippuas jatkok ohj (TOPSOS),, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603306,SOSNET-verkosto (sosiaalityƶ),, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604000,Kasv tdk yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604001,Kasv tdk tiedekuntapalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604002,Oppimistutkimuksen keskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604021,Et Koulutusvienti,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604100,Kasvatustieteiden laitos,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604200,Opettajankoulutuslaitoksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604201,OKL Turku,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604202,OKL Rauma,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604300,Turun normaalikoulun yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604301,Lukiokoulutus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604302,Perusopetus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604303,Opettajankoulutus ja kehitystehtƤvƤt,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604400,Rauman normaalikoulun yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604401,Perusopetus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604402,Opettajankoulutus ja kehitystehtƤvƤt,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2605000,Oikeustiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2605100,Oikeustieteellisen liiketoiminta,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606000,Mat luonn tdk yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606001,Mat luonn tdk hallintopalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606010,TYYK:n yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606011,Lapin tutkimuslaitos Kevo,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606012,Saaristomeren tutkimuslaitos,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606013,Aerobiologian yksikkƶ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606100,Matematiikan ja tilastotieteen lait yht,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606101,Matematiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606102,Sovellettu matematiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606103,Tilastotiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606200,Biokemian laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602219,Arkeologia ja Suomen historia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609021,Asiantuntijakehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606702,Avaruustutkimuslaboratorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609010,Avoimen yliopiston yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601416,Avoin yliopisto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601225,Avoin yliopisto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602002,Baltic Sea Regions Studies,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609260,BioCity Turku,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609260,BioCity Turku,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606017,Biodiversiteetti ja ympƤristƶnƤytepankki,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606401,Biodiversiteettitutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606010,Biodiversiteettiyksikƶn yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601631,Bioimaging,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606201,Biokemia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606202,Biotekniikka / FM,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606200,Biokemian laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606400,Biologian laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607100,BiolƤƤketieteen laitos,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609063,Biomedical and Environmental ICT,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607005,Biopankki,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607302,Biostatistiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609200,Biotekniikan keskuksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609201,Biotekniikan keskus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606203,Biotekniikka / DI,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606202,Biotekniikka / FM,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609030,Brahea kehittƤmispalvelujen elinkein yht,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609020,Brahea kehittƤmispalvelujen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609000,Brahea-keskuksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609000,Brahea-keskuksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609830,COE huippuyksikkƶ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609830,COE huippuyksikkƶ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609250,Cell Imaging,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609250,Cell Imaging,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608710,Centre for Collaborative Research (CCR),, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601109,Controller-palvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606001,Dekaani,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607303,Diagnostinen radiologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601641,Digital Futures,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601611,Drug Development and Diagnostics Platfor,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603203,Eduskuntatutkimuksen keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606402,Ekologia ja evoluutiobiologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606204,Elintarvikekemia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606205,Molekulaarinen kasvibiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606206,IFDRC,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606300,Kemian laitoksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606301,Kemian perusopetus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606302,Materiaalikemia ja kemiallinen analyysi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606303,Orgaaninen kemia ja kemiallinen biologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606304,Laitekeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606305,JBL-laboratorio,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606400,Biologian laitoksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606401,Biodiversiteettitutkimus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606402,Ekologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606403,YmpƤristƶtiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606404,Genetiikka,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606405,ElƤinfysiologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606406,ElƤinmuseo,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606407,Kasvimuseo,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606408,Kasvitieteellinen puutarha,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606409,Evoluutiobiologian sovelluskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606700,Fysiikan ja tƤhtitieteen laitoksen yhtei,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606701,Wihurin fysiikantutkimuslaboratorio,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606702,Avaruustutkimuslaboratorio,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606703,Teoreettinen fysiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606704,Teollisuusfysiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606705,Tuorlan observatorio,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606706,Materiaalitutkimuksen laboratorio,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606707,Kvanttioptiikan laboratorio,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606800,Informaatioteknologian laitoksen yhteise,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606801,Tietoliikennetekniikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606802,Sulautettu elektroniikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606803,TietojenkƤsittelytiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606804,Ohjelmistotekniikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606805,Bioinformatiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606806,Vuorovaikutussuunnittelu,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606900,Maantieteen ja geologian laitoksen yht.,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606901,Maantiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606902,Geologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607000,LƤƤket tdk yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607001,LƤƤket tdk hallintopalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607002,LƤƤket tdk tƤydennyskoulutus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607003,MediCity,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607004,SydƤntutkimuskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607005,Biopankki,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607010,Turun lapsi- ja nuorisotutkimuskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607020,Funktionaalisten elint. kehittƤmiskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607030,Koe-elƤinkeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609062,Embedded and Mixed-Reality Systems,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602101,Englannin kieli,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608311,Entre,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608310,Entren yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602102,Espanja,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609034,Et ARD kehittƤminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609032,Et ARD koulutus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609033,Et ARD tutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609030,Et ARD yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607040,Et Aistila,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607100,BiolƤƤketieteen laitoksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607101,Solubiologia ja anatomia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609033,Et Aluekehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609032,Et Asiantuntijakehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609031,Et Avoin yliopisto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603111,Et KiVa Koulu elinkeinotoiminta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609035,Et Kongressitoimisto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601270,Et Koulutusviennin yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604021,Et Koulutusvienti,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601278,Et Koulutusvienti Erilliset laitokset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601271,Et Koulutusvienti Hum. tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601273,Et Koulutusvienti Kasv. tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601275,Et Koulutusvienti Luonn. ja tekn. tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601276,Et Koulutusvienti LƤƤk. tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601274,Et Koulutusvienti Oik. tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601277,Et Koulutusvienti TuKKK,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601272,Et Koulutusvienti Yht. tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609053,Et Mkk Kotka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609055,Et Mkk Merifoorumi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609055,Et Mkk Merifoorumi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609054,Et Mkk Meriklusteriohjelma,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609052,Et Mkk T&K-hankkeet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609052,Et Mkk T&K-hankkeet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609051,Et Mkk koulutukset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609050,Et Mkk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609050,Et Mkk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2605100,Et Oikeustieteellisen liiketoiminta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608610,Et TSE Exe,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609034,Et Yliopistokehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609036,Et innovaatio- ja yrityskehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606409,Evoluutiobiologian sovelluskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609211,FMSC Bioinformatiikka,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607102,"Farmakologia, lƤƤkekehitys ja lƤƤkehoito",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603200,"Filosof, pol hist ja valtio-opin lts yht",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603101,Filosofia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603208,Filosofia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602221,Fokaus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602202,Folkloristiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602103,Fonetiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609210,Funktionaalisen genomiikan keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607020,Funktionaalisten elint. kehittƤmiskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607320,Fysiatria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606700,Fysiikan ja tƤhtitieteen laitoksen yhtei,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607103,Fysiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607104,LƤƤketieteellinen biokemia ja genetiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607105,LƤƤketietee mikrobiologia ja immunologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607107,Patologia ja oikeuslƤƤketiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607108,Virusoppi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607160,OikeuslƤƤketieteen maksupalveluyksikkƶ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607300,Kliinisen laitoksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607301,Anestesiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607302,Biostatistiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607303,Diagnostinen radiologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606404,Fysiologia ja genetiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606902,Geologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607304,Geriatria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602200,HKT-laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607500,HammaslƤƤketieteen laitos yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601424,Hankintapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601108,Hankintapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601400,Henkilƶstƶpalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602200,"Historian, kultt ja tait tutk lts:n yht",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607400,Hoitotieteen laitos,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602001,Humanistisen tiedekunnan hallintopalvelu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602000,Humanistisen tiedekunnan yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601407,Hyvinvointipalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606206,IFDRC,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603023,INVEST Lippulaiva Lastenpsykiatria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603022,INVEST Lippulaiva Psykologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603021,INVEST Lippulaiva Sosiologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601401,IT-palvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601288,IT-palvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607305,Iho- ja sukupuolitautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607051,InFLAMES Lippulaiva tutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607050,InFLAMES Lippulaiva yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607306,Infektiotautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609025,Innovaatio- ja yrityskehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601251,Innovaatiopalvelut ja koulutusvienti,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601287,"Innovaatiot,yrittƤjyys ja koulutusvienti",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602104,Italia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603204,ItƤ-Aasian tutkimus- ja koulutusk (CEAS),, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606305,JBL-laboratorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603207,John Morton -keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608301,Johtaminen ja organisointi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608300,Johtaminen ja yrittƤjyys yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601000,Johto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601000,Johto ja yliopiston yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602216,Kalevala instituutti,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601411,KansainvƤliset palvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601224,KansainvƤliset palvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607307,Kansanterveystiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602203,Kansatiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604001,Kasv tdk tiedekuntapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604000,Kasv tdk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604100,Kasvatustieteiden laitos,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606407,Kasvimuseo,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606408,Kasvitieteellinen puutarha,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601252,KehittƤminen ja suunnittelu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601280,KehittƤmispalvelut yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606300,Kemian laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606301,Kemian perusopetus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607308,Keuhkosairausoppi ja kliin. allergologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603107,KiVa Koulu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602100,Kieli- ja kƤƤnnƶstieteiden laitoksen yht,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606805,Kieli- ja puheteknologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609400,Kieli- ja viestintƤopintojen keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609400,Kielikeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608220,Kielten ja liikeviestinnƤn yksikkƶ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601011,Kiinanmylly,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602222,Kirjallisuustieteet ja kirjoittaminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601420,Kirjasto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601700,Kirjasto,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607309,Kirurgia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607310,Ortopedia ja traumatologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602105,Klassiset kielet ja antiikin kulttuuri,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607322,Kliininen fysiologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607311,Kliininen kemia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607321,Kliininen neurofysiologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607315,Kliininen syƶpƤtautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607300,Kliinisen laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607314,Kliiniset neurotieteet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607030,Koe-elƤinkeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603105,Kognitiivisen neurotieteen tutk yks (KNT,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606022,Konetekniikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609024,Kongressitoimisto,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607312,"Korva-, nenƤ-, ja kurkkutautioppi",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602204,Kotimainen kirjallisuus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601418,Koto/Koulutuksen tuki,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601227,Koto/Koulutuksen tuki,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601419,Koto/Opiskelijavalinta ja hakijapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601228,Koto/Opiskelijavalinta ja hakijapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601417,Koto/Opiskelun tuki,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601226,Koto/Opiskelun tuki,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601415,Koulutuksen toimiala tiedekunnissa,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601410,Koulutuksen toimialan yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601223,Koulutuksen toimialan yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609900,Koulutussosiologian tutkimuskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603309,Koulutussosiologian tutkimuskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602221,Kulttuurien tutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602215,Kulttuurientutkimuksen arkisto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602205,Kulttuurihistoria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602214,Kulttuurituotannon ja maisemantutkim ko,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602214,Kulttuurituotanto ja maisemantutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608202,Kv. liiketoiminta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606707,Kvanttioptiikan laboratorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601212,LLK Biokemian laitos,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601214,LLK Biologian lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601218,LLK BiolƤƤketieteen lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601215,LLK Fysiikan ja tƤhtitieteen lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601221,LLK HammaslƤƤketieteen lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601220,LLK Hoitotieteen lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601216,LLK Informaatioteknologian lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601213,LLK Kemian lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601219,LLK Kliininen laitos,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601217,LLK Maantieteen ja geologian lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601211,LLK Matematiikan ja tilastotieteen lts,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601222,LLK-tutkijatohtoriohjelma,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601210,LLK:n yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606304,Laitekeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601285,Lakiasiat,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606011,Lapin tutkimuslaitos Kevo,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608100,Laskentatoimen ja rahoituksen laitos yht,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608101,Laskentatoimi ja rahoitus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607325,Lastenneurologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607326,Lastenpsykiatria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604203,Lastentarhanopettajakoulutus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607313,Lastentautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602114,Lauseopin arkisto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601621,Learning and Education,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601414,Liikuntapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603102,Logopedia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604301,Lukiokoulutus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606000,Luonnontieteiden ja tekniikan tdk yht,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602217,Luova kirjoittaminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601405,LƤhipalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607001,LƤƤket tdk hallintopalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607002,LƤƤket tdk tƤydennyskoulutus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607000,LƤƤket tdk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607105,LƤƤketietee mikrobiologia ja immunologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607104,LƤƤketieteellinen biokemia ja genetiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607323,LƤƤketieteen etiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606901,Maantiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606900,Maantieteen ja geologian laitoksen yht.,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601433,Majoituspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601133,Majoituspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608200,Markk ja kv liiketoim laitos yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608201,Markkinointi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606100,Matematiikan ja tilastotieteen lait yht,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606101,Matematiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606302,Materiaalikemia ja kemiallinen analyysi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606021,Materiaalitekniikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606706,Materiaalitutkimuksen laboratorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601106,Matkapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607003,MediCity,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602223,"Median, musiikin ja taiteen tutkimus",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602206,Mediatutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602223,Memuta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609061,Microelectronics,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606807,Mikroelektroniikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609210,Mikrosirukeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609043,Mkk Kotka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609045,Mkk Merifoorumi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609045,Mkk Merifoorumi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609044,Mkk Meriklusteriohjelma,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609042,Mkk Yhteisrahoitteiset T&K-hankkeet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609042,Mkk Yhteisrahoitteiset T&K-hankkeet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609041,Mkk koulutustoiminta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609040,Mkk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609040,Mkk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606205,Molekulaarinen kasvibiologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602218,Museologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602207,Musiikkitiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601004,Muutostuki,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607314,Neurologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607315,Kliininen syƶpƤtautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601651,New Bioresources,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604202,OKL Rauma,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604201,OKL Turku,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601412,Ohjauksen ja koulutuksen tukipalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606804,Ohjelmistotekniikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607160,OikeuslƤƤketieteen maksupalveluyksikkƶ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2605000,Oikeustiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604303,Opettajankoulutus ja kehitystehtƤvƤt,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604402,Opettajankoulutus ja kehitystehtƤvƤt,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604200,Opettajankoulutuslaitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601413,Opiskelija- ja hakijapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601421,Oppimisen palvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604002,Oppimistutkimuksen keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603106,Oppimistutkimuksen keskus (OTUK),, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606303,Orgaaninen kemia ja kemiallinen biologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607310,Ortopedia ja traumatologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609810,PET Perustoiminta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609810,PET Perustoiminta,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609820,PET Tutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609820,PET Tutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603104,PSYKONET -verkosto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607324,Palliatiivinen lƤƤketiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601406,Palvelujohtajan yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608210,Pan-Eurooppa Instituutti,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607107,Patologia ja oikeuslƤƤketiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604302,Perusopetus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604401,Perusopetus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602106,Pohjoismaiset kielet,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603201,Poliittinen historia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608802,Porin hankerahoitus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608800,Porin tutkintokoulutus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608804,Porin yksikƶn yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601671,Profilaatio 4,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601681,Profilaatio 5,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601107,Projektipalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609240,Proteomics,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609240,Proteomics,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601434,Protopaja,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601134,Protopaja,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607316,Psykiatria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603103,Psykologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603100,Psykologian ja logopedian laitoksen yht,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601900,Rahastot,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602107,Ranska,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604400,Rauman normaalikoulun yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601105,Reskontrapalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609901,Rosa-laboratorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601012,Ruissalo,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601250,SKY yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603306,SOSNET-verkosto (sosiaalityƶ),, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606012,Saaristomeren tutkimuslaitos,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602108,Saksan kieli,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601661,Sea and Maritime Studies,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607317,SilmƤtautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609001,SisƤiset palvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607318,SisƤtautioppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607101,Solubiologia ja anatomia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603301,Sosiaalipolitiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603300,Sosiaalitieteiden laitoksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603302,Sosiaalityƶ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603305,Sosiaalivak huippuas jatkok ohj (TOPSOS),, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603303,Sosiologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607007,Sote-akatemia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606102,Sovellettu matematiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606013,Soveltavan ympƤristƶtutkimuksen laborato,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601284,Strateginen ohjaus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602208,Sukupuolentutkimus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606802,Sulautettu elektroniikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609700,Suomen ESO-keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609700,Suomen ESO-keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602209,Suomen historia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602110,Suomen kieli ja suom-ugrilainen kielent,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607004,SydƤntutkimuskeskus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607319,Synnytys- ja naistentautien oppi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607320,Fysiatria,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607321,Kliininen neurofysiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607322,Kliininen fysiologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607323,LƤƤketieteen etiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607324,Palliatiivinen lƤƤketiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607325,Lastenneurologia,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607326,Lastenpsykiatria,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607327,Tyƶterveyshuolto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607328,YleislƤƤketiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607330,Turun lapsi- ja nuorisotutkimuskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607400,Hoitotieteen laitos,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607500,HammaslƤƤketieteen laitos yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607006,SyƶpƤtutkimuslaboratorio,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607511,TCBC,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608000,TuKKK:n yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608001,TuKKK:n hallintopalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608002,TuKKK:n viestintƤ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608100,Laskentatoimen ja rahoituksen laitos yht,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608101,Laskentatoimi ja rahoitus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608102,Yritysjuridiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608103,Taloustieteen kvantitatiiviset menetelmƤ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608200,Markk ja kv liiketoim laitos yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608201,Markkinointi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608202,Kv. liiketoiminta,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608203,Toimitusketjujen johtaminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601261,TCSI International,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601260,TCSI yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601231,TIAS Humanistinen tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601233,TIAS Kasvatustieteiden tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601235,TIAS Kauppakorkeakoulu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601234,TIAS Oikeustieteellinen tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601232,TIAS Yhteiskuntatieteellinen tdk,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601230,TIAS-tutkijakollegiumin yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601236,TIAS-tutkijatohtoriohjelma,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609060,TRC yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608805,TSE Porin yksikƶn kehittƤminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602210,Taidehistoria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601109,Talouden suunnittelu ja seuranta,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608204,Talousmaantiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608210,Pan-Eurooppa Instituutti,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608220,Kielten ja liikeviestinnƤn yksikkƶ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608300,Johtamisen ja yrittƤjyyden laitos yhteis,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608301,Johtaminen ja organisointi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608303,TietojƤrjestelmƤtiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608310,Entren yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608311,Entre,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608312,YrittƤjyys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608320,"Turun tyƶtieteiden keskus, TCLS",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601100,Talouspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603304,Taloussosiologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608400,Taloustiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608610,TSE Exe,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608710,Centre for Collaborative Research (CCR),, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608800,Porin tutkintokoulutus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608802,Porin hankerahoitus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608804,Porin yksikƶn yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608805,Porin tƤydennyskoulutus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608103,Taloustieteen kvantitatiiviset menetelmƤ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606020,Tekniikan laajennuksen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606704,Teollisuus fysiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606703,Teoreettinen fysiikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606808,Terveysteknologia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603206,The Public Choice Research Centre (PCRC),, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601930,Tiedekuntien stipendirahasto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601423,Tietoaineistojen saatavuuspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606803,TietojenkƤsittelytiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608303,TietojƤrjestelmƤtiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606801,Tietoliikennetekniikka,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601432,Tila- ja kuljetuspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601132,Tila- ja kuljetuspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606103,Tilastotiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601431,Tilasuunnittelu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601131,Tilasuunnittelu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601430,Toimitilapalvelujen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601130,Toimitilapalvelujen yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608203,Toimitusketjujen johtaminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608001,TuKKK:n hallintopalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608002,TuKKK:n viestintƤ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608000,TuKKK:n yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606800,Tulevaisuuden teknologioiden laitoks yht,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608900,Tulevaisuuden tutkimuskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609000,Brahea-keskuksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609001,SisƤiset palvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609010,Avoimen yliopiston yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609011,AVO Humanistisen tdk opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609012,AVO Kasvatustieteiden tdk opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609013,AVO Matemaattis-luonnon tdk opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609014,AVO Yhteiskuntatieteellisen tdk opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609015,AVO LƤƤketieteellisen tdk opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609016,AVO Oikeustieteellisen tdk opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609017,AVO Kauppakorkeakoulun opinnot,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609018,AVO Muut koulutukset ja hankkeet,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609020,Brahea kehittƤmispalvelujen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609021,Asiantuntijakehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609022,Aluekehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609023,Yliopistokehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609024,Kongressitoimisto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609025,Innovaatio- ja yrityskehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609030,Brahea kehittƤmispalvelujen elinkein yht,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609031,Et Avoin yliopisto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609032,Et Asiantuntijakehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609033,Et Aluekehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609034,Et Yliopistokehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609035,Et Kongressitoimisto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609036,Et innovaatio- ja yrityskehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609040,Mkk yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609041,Mkk koulutustoiminta,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609042,Mkk Yhteisrahoitteiset T&K-hankkeet,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609043,Mkk Kotka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609044,Mkk Meriklusteriohjelma,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609045,Mkk Merifoorumi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609050,Et Mkk yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609051,Et Mkk koulutukset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609052,Et Mkk T&K-hankkeet,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609053,Et Mkk Kotka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609054,Et Mkk Meriklusteriohjelma,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609055,Et Mkk Merifoorumi,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609060,TRC yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609061,Microelectronics,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609062,Embedded and Mixed-Reality Systems,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609063,Biomedical and Environmental ICT,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609200,Biotekniikan keskuksen yhteiset,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609201,Biotekniikan keskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609210,Mikrosirukeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609211,FMSC Bioinformatiikka,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609240,Proteomics,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609250,Cell Imaging,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609260,BioCity Turku,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609400,Kielikeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606019,Tuorla ja Luma-keskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606705,Tuorlan observatorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606104,Turku Complex Systems Institute Cern,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609201,Turun biotiedekeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2609200,Turun biotiedekeskus yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607010,Turun lapsi- ja nuorisotutkimuskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607330,Turun lapsi- ja nuorisotutkimuskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604300,Turun normaalikoulun yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609500,Turun tietotekniikan tutkimuskeskus TUCS,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609700,Suomen ESO-keskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609810,PET Perustoiminta,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609820,PET Tutkimus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609830,COE huippuyksikkƶ,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609900,Koulutussosiologian tutkimuskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609901,Rosa-laboratorio,, -Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,,,http://isni.org/isni/0000000406476659,2355 -Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,10,Tekniikan yksikkƶ,, -Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,20,Liiketalouden yksikkƶ,, -Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,30,Sosiaali- ja terveysalan yksikkƶ,, -Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,40,MUOVA,, -Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,50,Hallinto ja tukipalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608320,"Turun tyƶtieteiden keskus, TCLS",, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601436,Turvallisuus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601283,Tutkijanura,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601200,Tutkijanurapalvelut yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601201,Tutkijatohtoriohjelma,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601241,Tutkimuksen kehittƤminen,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601422,Tutkimuksen palvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601240,Tutkimuksen toimialan yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601282,Tutkimusedellytykset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601242,Tutkimuspalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601281,Tutkimusrahoitus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601435,TyhjƤt tilat,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607327,Tyƶterveyshuolto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601202,UTUGS,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602211,Uskontotiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602224,Uudet avaukset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603202,Valtio-oppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602111,VenƤjƤn kieli,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601300,ViestintƤ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607108,Virusoppi,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602109,Volgalaiskielten tutkimusyksikkƶ,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606806,Vuorovaikutusmuotoilu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601510,VyƶrytettƤvƤt kustannukset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607008,VƤestƶtutkimuskeskus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606701,Wihurin fysiikantutkimuslaboratorio,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601013,Yhteiset tilat ml. tyhjƤt ja opetustilat,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603001,Yhteisk tdk hallintopalvelut,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603000,Yhteisk tdk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603020,Yhteisk tdk yhteiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602212,Yleinen historia,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602220,Yleinen historia ja kulttuurihistoria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602213,Yleinen kirjallisuustiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602220,Yleinen kulttuurihistoria,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607328,YleislƤƤketiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603205,Yliopistojen Aasia-verkosto,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609023,Yliopistokehitys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601004,Yliopiston theiset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601286,Yliopiston vaikuttavuus,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604305,YlƤkoulu,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606018,YmpƤristƶmuutokset,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606403,YmpƤristƶtiede,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608312,YrittƤjyys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608305,YrittƤjyys,, +Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608102,Yritysjuridiikka,, +Turun yliopistollisen keskussairaalan erityisvastuualue,Turku University Hospital Catchment Area,,08282559,,,,, +Tyƶterveyslaitos,Finnish Institute of Occupational Health,,02202669,,,,, +Ulkopoliittinen instituutti,The Finnish Institute of International Affairs,,1120017,,,,, +Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,,,, +Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,Vaasan ammattikorkeakoulu,02627,,50,Hallinto ja tukipalvelut,, +Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,Vaasan ammattikorkeakoulu,02627,,20,Liiketalouden yksikkƶ,, +Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,Vaasan ammattikorkeakoulu,02627,,40,MUOVA,, +Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,Vaasan ammattikorkeakoulu,02627,,30,Sosiaali- ja terveysalan yksikkƶ,, Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,,02627,,60,TKI-yksikkƶ,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,,,http://isni.org/isni/0000000106722619,2363 -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710050,Nykysuomi ja kƤƤntƤminen,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710100,Pohjoismaiset kielet,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710150,Englannin kieli,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710200,Ranskan kieli,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710250,VenƤjƤn kieli,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710300,Saksan kieli ja kirjallisuus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710350,ViestintƤtieteet,, +Vaasan ammattikorkeakoulu,Vaasa University of Applied Sciences,Vaasan ammattikorkeakoulu,02627,,10,Tekniikan yksikkƶ,, +Vaasan yliopisto,University of Vaasa,,01913,,,,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701400,Aluetiede,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710400,Aluetiede,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710450,Sosiologia,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710500,Hallintotiede/julkisjohtaminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708330,"Aulapalvelut, tilat ja turvallisuus",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760550,"Aulapalvelut, tilat ja turvallisuus",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704820,Automaatiotekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730050,Automaatiotekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740050,Avoin yliopisto,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270500,Digital Economy,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2705000,Digital Economy,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740150,Energia ja aluekehittƤminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704810,Energiatekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730100,Energiatekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710150,Englannin kieli,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701410,Filosofia,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710550,Filosofia,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710600,Julkisoikeus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710650,Sosiaali- ja terveyshallintotiede,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710700,Kielipalvelut,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710900,Filosofinen tdk yht.,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730150,Fysiikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710500,Hallintotiede/julkisjohtaminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708110,Hallitus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760050,Hallitus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701500,Henkilƶstƶjohtaminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708310,Henkilƶstƶn ja ylimmƤn johdon palvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760300,Henkilƶstƶn palvelut ja ylimmƤn johdon palvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270600,Innolab,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2706000,Innolab,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708130,Johdon asiantuntijatuki,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760150,Johdon asiantuntijatuki,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740100,Johtaminen ja organisaatioiden kehittƤminen,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720050,Johtaminen ja organisaatiot,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701420,Julkisjohtaminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701430,Julkisoikeus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710600,Julkisoikeus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760500,KansainvƤliset asiat,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2702500,KansanvƤlinen liiketoiminta,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720900,Kauppatieteell. tdk yht.,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270700,Kielikeskus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2707000,Kielikeskus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710700,Kielipalvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2703500,Laskentatoimi,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720100,Laskentatoimi ja rahoitus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,274000,LevƃĀ³n-instituutti,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740900,LevƃĀ³n-instituutti yhteiset,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708350,"LƤhipalvelut ja johdon palvelut, Fabriikki",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760750,"LƤhipalvelut ja johdon palvelut, Fabriikki",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708360,"LƤhipalvelut ja johdon palvelut, Tervahovi",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760800,"LƤhipalvelut ja johdon palvelut, Tervahovi",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2702510,Markkinointi,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720150,Markkinointi,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720200,Taloustiede,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720250,Talousoikeus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720900,Kauppatieteell. tdk yht.,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730050,Automaatiotekniikka,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730100,Energiatekniikka,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730150,Fysiikka,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730200,SƤhkƶtekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704830,Matemaattiset tieteet,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730250,Matematiikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710050,Nykysuomi ja kƤƤntƤminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708210,Opiskelun ja opetuksen palvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760450,Opiskelun ja opetuksen palvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710100,Pohjoismaiset kielet,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2703510,Rahoitus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710200,Ranskan kieli,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708120,Rehtori,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760100,Rehtori,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704850,SC-Research,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2735050,SC-Research,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710300,Saksan kieli ja kirjallisuus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2703000,School of Accounting & Finance,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270300,School of Accounting and Finance,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270100,School of Management,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701000,School of Management,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270200,School of Marketing and Communication,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2702000,School of Marketing and Communication,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270400,School of Technology and Innovations,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704000,School of Technology and Innovations,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701440,Sosiaali- ja terveyshallintotiede,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710650,Sosiaali- ja terveyshallintotiede,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710450,Sosiologia,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2701510,Strateginen johtaminen,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704800,SƤhkƶtekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730200,SƤhkƶtekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730700,TB Laboratoriot,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708320,Talouden ja hankehallinnon palvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760350,Talouden ja hankehallinnon palvelut,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730300,Talousmatematiikka,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730350,Tilastotiede,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2703520,Talousoikeus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720250,Talousoikeus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2703400,Taloustiede,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2720200,Taloustiede,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704860,Technobotnia,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730900,Teknillinen tdk. yhteiset,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708340,Tietohallintopalvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760650,Tietohallintopalvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704720,Tietoliikennetekniikka,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730400,Tietoliikennetekniikka,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704700,Tietotekniikka kaupp,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730450,Tietotekniikka kauppat.,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704710,Tietotekniikka tekn,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730500,Tietotekniikka tekn.,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760700,Tietotekniikkapalvelut,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730350,Tilastotiede,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760400,Toiminta- taloussuunnittelu,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,275000,Tritonia,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2750050,Tritonia,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730600,Tuotantotalous,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704500,Tuotantotalous kaupp,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2704840,Tuotantotalous tekn,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730660,Tuotantotalous tekn,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730700,TB Laboratoriot,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2730900,Teknillinen tdk. yhteiset,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2735050,SC-Research,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708220,Tutkimuksen palvelut ja tutkijakoulu,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760200,Tutkimuksen palvelut ja tutkijakoulu,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2790100,Vaasan korkeakoulukonsortio,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2780000,Vaasan yliopiston rahasto,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,278000,Vaasan yliopiston rahastot,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,273900,Vebic,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739150,Vebic julkinen rahoitus,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739050,Vebic omarahoitus,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739100,Vebic taloudellinen toiminta,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739150,Vebic julkinen rahoitus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740050,Avoin yliopisto,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740100,Johtaminen ja organisaatioiden kehittƤminen,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740150,Energia ja ympƤristƶ,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2740900,LevĆ³n-instituutin yhteiset,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2750050,Tritonia,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760050,Hallitus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760100,Rehtori,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760150,Johdon asiantuntijatuki,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760200,Tutkimuksen palvelut ja tutkijakoulu,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270900,Vebic-alusta,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2709000,Vebic-alusta,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,273900,Vebic-infra,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739150,Vebic-infra julkinen rahoitus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739050,Vebic-infra omarahoitus,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739100,Vebic-infra taloudellinen toiminta,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710250,VenƤjƤn kieli,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708140,ViestintƤ ja kumppanuudet,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760250,ViestintƤ ja kumppanuudet,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760300,Henkilƶstƶn palvelut ja ylimmƤn johdon palvelut,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760350,Talouden ja hankehallinnon palvelut,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760400,Toiminta- taloussuunnittelu,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760450,Opiskelun ja opetuksen palvelut,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760500,KansainvƤliset asiat,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760550,"Aulapalvelut, tilat ja turvallisuus",, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760650,Tietohallintopalvelut,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760700,Tietotekniikkapalvelut,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760750,"LƤhipalvelut ja johdon palvelut, Fabriikki",, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760800,"LƤhipalvelut ja johdon palvelut, Tervahovi",, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2780000,Vaasan yo sijoitustoiminta,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2702400,ViestintƤtieteet,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710350,ViestintƤtieteet,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2780050,Viljo Syreniuksen rahasto,, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,279000,Yliopiston yhteiset,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2790050,Yliopiston yhteiset,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2790100,Vaasan korkeakoulukonsortio,, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,,,http://isni.org/isni/0000000404001027,163 -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,11,"HV, Pedagogik",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,111,"HV, HƤlsa och vƤlfƤrd (utom idrott)",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,112,"HV, Idrott",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,212,"EA, Turism",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,25,"EA, Tradenom",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,27,"EA, IT",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,38,"EM, Teknik",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,42,"KK, Media och kultur",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,51,"Ɩvriga, Pedagogik",, +Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270800,Yliopistopalvelut,, +Valtion taloudellinen tutkimuskeskus,VATT Institute for Economic Research,,3060016,,,,, +VƤestƶrekisterikeskus,Population Register Center,,02454372,,,,, +VƤylƤvirasto,Finnish Transport Infrastructure Agency,,10105471,,,,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,,02535,,,,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,111,Alla utbildningar pĆ„ hƤlsa och vƤlfƤrd fƶruto,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,111,Alla utbildningar pĆ„ hƤlsa och vƤlfƤrd fƶrutom idrott,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,611,Alla utbildningar pĆ„ institutionen fƶr vĆ„rd,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,27,IT-utbildningen,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,112,Idrottsutbildningen,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,2,Institutionen fƶr ekonomi och affƤrsanalys,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,3,Institutionen fƶr energi- och materialteknologi,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,1,Institutionen fƶr hƤlsa och vƤlfƤrd,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,4,Institutionen fƶr kultur och media,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,6,Institutionen fƶr vĆ„rd,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,42,Medie- och kulturutbildningen,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,21,Pedagogik pĆ„ institutionen fƶr ekonomi och affƤ,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,31,Pedagogik pĆ„ institutionen fƶr energi- och mater,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,11,Pedagogik pĆ„ institutionen fƶr hƤlsa och vƤlfƃ,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,11,Pedagogik pĆ„ institutionen fƶr hƤlsa och vƤlfƤrd,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,41,Pedagogik pĆ„ institutionen fƶr kultur och media,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,61,Pedagogik pĆ„ institutionen fƶr vĆ„rd,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,38,Teknikutbildningen,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,25,Tradenomutbildningen,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,212,Turismutbildningen,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,9,ƃā€“vriga enheter,, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,95,"ƃā€“vriga, ekonomi, administration och juridik",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,93,"ƃā€“vriga, humanistiska omrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,911,"ƃā€“vriga, hƤlsa och vƤlfƤrdsomrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,97,"ƃā€“vriga, informations- och kommunikationsteknik",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,92,"ƃā€“vriga, konst- och kulturomrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,91,"ƃā€“vriga, pedagogiska omrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,912,"ƃā€“vriga, service och tjƤnster",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,98,"ƃā€“vriga, teknikomrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,9,Ɩvriga enheter,, +Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,55,"Ɩvriga, Ekonomi, administration och juridik",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,511,"Ɩvriga, HV (utom idrott)",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,512,"Ɩvriga, Service och tjƤnster",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,52,"Ɩvriga, Konst- och kultur",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,53,"Ɩvriga, Humaniora",, -Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,55,"Ɩvriga, Ekonomi, administration och juridik",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,57,"Ɩvriga, IT",, +Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,52,"Ɩvriga, Konst- och kultur",, +Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,51,"Ɩvriga, Pedagogik",, +Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,512,"Ɩvriga, Service och tjƤnster",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,58,"Ɩvriga, Teknik",, -Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,,,http://isni.org/isni/0000000406476587,2865 +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,95,"Ɩvriga, ekonomi, administration och juridik",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,93,"Ɩvriga, humanistiska omrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,911,"Ɩvriga, hƤlsa och vƤlfƤrdsomrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,97,"Ɩvriga, informations- och kommunikationsteknik",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,92,"Ɩvriga, konst- och kulturomrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,91,"Ɩvriga, pedagogiska omrĆ„det",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,912,"Ɩvriga, service och tjƤnster",, +Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,98,"Ɩvriga, teknikomrĆ„det",, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,,10066,,,,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,AD,Administration,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,AD,Administration;Administration;,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,JA,Enheten Jakobstad;Enheten Jakobstad;,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,VA,Enheten Vasa;Enheten Vasa;,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,ƃā€¦R,Enheten ƃā€¦boRaseborg;Enheten ƃā€¦boRaseborg;,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,FoU,Forskning och utveckling,, -Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,TS,Turism och SamhƤlle,, -Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,AD,Administration,, -Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,FE,Fƶretagsekonomi,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,BE,Institutionen fƶr bioekonomi,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,FE,Institutionen fƶr fƶretagsekonomi,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,HV,Institutionen fƶr hƤlsa och vƤlfƤrd,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,KK,Institutionen fƶr konst och kultur,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,TS,Institutionen fƶr teknik och sjƶfart,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,KU,Kultur,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,NB,Naturbruk,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,NV,Naturvetenskap,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,SF,Sjƶfart,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,TE,Teknik,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,VS,VĆ„rd och sociala,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,,,http://isni.org/isni/0000000121652639,w4VibyBBa2FkZW1p -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801000,FHPT gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801010,FHPT universitetsservice,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801100,"Kultur, historia och filosofi",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801110,SprĆ„k,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801111,Logopedi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801120,Teologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801130,Psykologi,, +org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_name,org_isni,org_csc +ƅbo Akademi,ƅbo Akademi University,,01903,,,,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,22,AllmƤn pedagogik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,111,Analytisk kemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,118,AnlƤggningsteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,23,Barnpedagogik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,90,Biokemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804300,Biokemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,98,Bioteknikcentrum,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,99,Bioteknikcentrum,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804400,Bioteknikcentrum,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280440,Bioteknikcentrum,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280440,Bioteknikcentrum (BTC),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804360,Biovet gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804360,Biovet. gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,95,Biovetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280430,Biovetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,92,Cellbiologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804311,Cellbiologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805000,Centret fƶr LivslĆ„ngt LƤrande,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280500,Centret fƶr LivslĆ„ngt LƤrande,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805010,Centret fƶr SprĆ„k och Kommunikation,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,124,Centret fƶr livslĆ„ngt lƤrande,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805000,Centret fƶr livslĆ„ngt lƤrande,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280500,Centret fƶr livslĆ„ngt lƤrande,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,125,Centret fƶr livslĆ„ngt lƤrande (CLL),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,128,Centret fƶr sprĆ„k och kommunikation,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805010,Centret fƶr sprĆ„k och kommunikation,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,129,Centret fƶr sprĆ„k och kommunikation (CSK),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,130,Datacentralen,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,131,Datacentralen,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,102,Datateknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804500,Datateknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,103,Datavetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804510,Datavetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,26,De matematisk-naturvetenskapliga Ƥmnenas och idrottens didaktik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,65,Demografi och landsbygdsforskning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,19,DnƤtverk Art History,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,123,DnƤtverk Chemical Engineering,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804343,DnƤtverk Functional Marine Biodiversity,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,106,DnƤtverk IT and Mathematics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,96,DnƤtverk Informational and Structural Biology,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,85,DnƤtverk Material Research,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,97,DnƤtverk Molecular Biosciences,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,43,DnƤtverk Old Testament studies,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,48,DnƤtverk Psychology and Logopedics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,70,DnƤtverk Realizing Human Rights,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,57,DnƤtverk School of Business and Economics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,42,Dogmatik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801142,DoktorandnƤtverk AAPL,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801140,DoktorandnƤtverk Art History,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804141,DoktorandnƤtverk Chemical Engineering,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804070,DoktorandnƤtverk Chemical Engineering,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803143,DoktorandnƤtverk Citizens and Democracy,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801146,DoktorandnƤtverk FHPT 2018-2021,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801147,DoktorandnƤtverk FHPT 2020-2023,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802141,DoktorandnƤtverk FPV 2018-2021,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802142,DoktorandnƤtverk FPV 2020-2023,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803144,DoktorandnƤtverk FSE 2018-2021,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803145,DoktorandnƤtverk FSE 2020-2023,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804343,DoktorandnƤtverk Functional Marine Biodiversity,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804540,DoktorandnƤtverk IT & Mathematics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804340,DoktorandnƤtverk Inform. and Struct. Bio,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804340,DoktorandnƤtverk Informational and Structural Bio,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804140,DoktorandnƤtverk Material Research,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804060,DoktorandnƤtverk Material Research,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801144,DoktorandnƤtverk Minority Research,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804341,DoktorandnƤtverk Molecular Biosciences,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801141,DoktorandnƤtverk Old Testament Studies,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801142,DoktorandnƤtverk AAPL (Abo Akademi Psychology and Logopedics),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801143,"Gamla forskarskolan, FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801144,DoktorandnƤtverket Minority Research,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801145,DoktorandnƤtverket The Age of Sweden,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803140,DoktorandnƤtverk Realizing Human R.,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803140,DoktorandnƤtverk Realizing Human Rights,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803141,DoktorandnƤtverk School of Business Economics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801145,DoktorandnƤtverk The Age of Sweden,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,142,Ekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806131,"Ekonomiservice, FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806134,"Ekonomiservice, FNT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806132,"Ekonomiservice, FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806133,"Ekonomiservice, FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806130,"Ekonomiservice, central",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806130,"Ekonomiservice,Central",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806131,"Ekonomiservice,FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806134,"Ekonomiservice,FNT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806132,"Ekonomiservice,FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806133,"Ekonomiservice,FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804121,Energi- och miljƶteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804720,Energiteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,121,Energiteknik Vasa,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,3,Engelska,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,10,Etnologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802130,Experience Lab,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802130,Experience lab,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280100,FHPT StƶdtjƤnster,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801010,FHPT Uniservice,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801000,FHPT gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801010,FHPT universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801150,FHPT utbildning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280400,FNT StƶdtjƤnster,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804000,FNT gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280400,FNT stƶdtjƤnster,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804010,FNT universitetsservice,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802000,FPV Gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280200,FPV StƶdtjƤnster,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280220,FPV Vasa ƃā€“vningsskola (Vƃā€“S),, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802000,FPV gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280200,FPV stƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802010,FPV universitetsservice,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802100,Pedagogik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802110,HƤlsovetenskaper,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802120,Socialvetenskaper,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802130,MediaCity,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802140,"Gamla forskarskolan, FPV",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802150,FPV utbildning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,"Vasa ƶvningsskola, gemensamma kostnader",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,"Vasa ƶvningsskola, grundutbildning",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,"Vasa ƶvningsskola, gymnasiet",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803000,FSE Gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280300,FSE StƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803000,FSE gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280300,FSE stƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803010,FSE universitetsservice,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803100,Fƶretagsekonomi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803110,Nationalekonomi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803111,Informationsvetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803112,Informationssystem,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803113,IAMSR,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803120,SamhƤllsvetenskaper,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803121,RƤttsvetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803122,Institutet fƶr samhƤllsforskning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803123,Institutet fƶr mƤnskliga rƤttigheter,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803140,DoktorandnƤtverk Realizing Human Rights,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803141,DoktorandnƤtverk School of Business and Economics,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803142,"Gamla forskarskolan, FSE",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803143,DoktorandnƤtverk Citizens and Demogracy,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803150,FSE utbildning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804000,FNT gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804010,FNT universitetsservice,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804050,Tekniska serviceenheten,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804100,Naturvetenskaper,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804110,Kemi (teknisktvetenskapliga),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804111,Kemi (naturvetenskapliga),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804120,Process- och systemteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804121,Energi- och miljƶteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804130,Naturmaterialteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804140,DoktorandnƤtverk Material Research,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804141,DoktorandnƤtverk Chemical Engineering,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804142,"Gamla forskarskolan, NatVet & teknik",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804150,"Utbildning, naturvetenskaper och teknik",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804160,Naturvetenskap och teknik gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804200,Nationalla PET-centret,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804300,Biokemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,91,Farmaci,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804310,Farmaci,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804311,Cellbiologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804312,Husƶ biologiska station,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804313,Miljƶ- och marinbiologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804340,DoktorandnƤtverk Informational and Structural Biology,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804341,DoktorandnƤtverk Molecular Biosciences,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804342,"Gamla forskarskolan, BioVet",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804343,DoktorandnƤtverket Functional Marine Biodiversity,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804350,"Utbildning, biovetenskaper",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804360,Biovetenskap gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804400,Bioteknikcentrum,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804410,"Gamla forskarskolan, BTC",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804500,Datateknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804510,Datavetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804540,DoktorandnƤtverk Information Technologies and Mathematics,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804541,"Gamla forskarskolan, IT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804550,"Utbildning, informationsteknologi",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804560,IT gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804600,Turku Centre for Computer Science,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804610,"Gamla forskarskolan, TUCS",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805000,Centret fƶr livslĆ„ngt lƤrande,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805010,Centret fƶr sprĆ„k och kommunikation,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƅbo Akademis bibliotek,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806010,Tritonia,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806020,Sibeliusmuseum,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806030,Sjƶhistoriska institutet,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806100,"AllmƤn universitetsservice, central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806101,"AllmƤn universitetsservice, FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806102,"AllmƤn universitetsservice, FPV",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806103,"AllmƤn universitetsservice, FSE",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806104,"AllmƤn universitetsservice, FNT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806110,Ledning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806111,Kommunikation,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806120,Forskningsservice,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806121,"Utbildningsservice, central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806122,"Utbildningsservice, FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806123,"Utbildningsservice, FPV",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806124,"Utbildningsservice, FSE",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806125,"Utbildningsservice, FNT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806130,"Ekonomiservice, central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806131,"Ekonomiservice, FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806132,"Ekonomiservice, FPV",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806133,"Ekonomiservice, FSE",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806134,"Ekonomiservice, FNT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806140,Personalservice,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806150,Fastighetsservice och upphandling,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806151,Planering,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,138,Fastigheter,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806152,Fastigheter och hyror,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806160,ICT-service,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806150,Fastighetsservice och upphandling,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,116,Fiber- och cellulosateknologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,9,Filosofi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,4,Finska,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,11,Folkloristik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,143,Forskning och utbildning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806120,Forskningsservice,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,5,Franska,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,81,Fysik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804700,Fysik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,83,Fysikalisk kemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,54,Fƶretagets organisation och ledning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803100,Fƶretagsekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,51,Fƶretagsekonomiska Ƥmnena,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,141,Fƶrvaltningen gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804410,"Gamla forskarskolan, BTC",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804342,"Gamla forskarskolan, BioVet",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801143,"Gamla forskarskolan, FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802140,"Gamla forskarskolan, FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803142,"Gamla forskarskolan, FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804541,"Gamla forskarskolan, IT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804142,"Gamla forskarskolan, NatVet & teknik",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804610,"Gamla forskarskolan, TUCS",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,37,Gammaltestamentlig exegetik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,84,Geologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804702,Geologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,49,Handelshƶgskolan,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,50,Handelshƶgskolan gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,12,Historia,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280110,"Humaniora, psykologi och teologi",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,1,Humanistiska fakulteten,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2,Humanistiska fakulteten gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,3,Engelska,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,4,Finska,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,5,Franska,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,6,Ryska,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,7,Svenska,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,8,Tyska,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,9,Filosofi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,10,Etnologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,11,Folkloristik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,12,Historia,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,13,Religionsvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,93,Husƶ biologiska station,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804312,Husƶ biologiska station,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802110,HƤlsovetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803113,IAMSR,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806160,ICT service,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806160,ICT-service,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804721,Industriell Ekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,122,Industriell ekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,55,Informationsfƶrvaltining,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,104,Informationssystem,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803112,Informationssystem,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804560,Informationsteknologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280450,Informationsteknologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803111,Informationsvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803123,Insitutet fƶr mƤnskliga rƤttigheter,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,110,Insitutionen fƶr kemiteknik gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,101,Instit. fƶr informationsteknologi gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,105,Institute for Advanced Management System Research (IAMSR),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803123,Institutet fƶr mƤnskliga rƤttigheter,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,69,Institutet fƶr mƤnskliga rƤttiheter,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803122,Institutet fƶr samhƤllsforskning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,88,Institutionen fƶr biovetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,89,Institutionen fƶr biovetenskaper gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,100,Institutionen fƶr informationsteknologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,109,Institutionen fƶr kemiteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,76,Institutionen fƶr naturvetenskap gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,44,Institutionen fƶr psykologi och logopedi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,53,Internationell marknadsfƶring,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,140,Kansler,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804111,Kemi (naturvetenskaplig),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804110,Kemi (tekn.vetenskaplig),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280471,Kemi och kemiteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804713,Kemier gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,144,Kommunikation,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806111,Kommunikation,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,14,Konstvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801100,"Kultur, historia och filosofi",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,61,Kvinnovetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,38,Kyrkohistoria,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804050,Laboratorieservice,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806110,Ledning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806101,"Lednings- och fakultetsstƶd, FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806104,"Lednings- och fakultetsstƶd, FNT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806102,"Lednings- och fakultetsstƶd, FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806103,"Lednings- och fakultetsstƶd, FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806100,"Lednings- och fakultetsstƶd, central",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806101,"Lednings- och fakultetsstƶd,FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806104,"Lednings- och fakultetsstƶd,FNT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806102,"Lednings- och fakultetsstƶd,FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806103,"Lednings- och fakultetsstƶd,FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806100,"Lednings- och fakultetsstƶd,central",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,15,Litteraturvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,47,Logopedi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801111,Logopedi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,78,Matematik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804701,Matematik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,126,Mediacity,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,127,Mediacity,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,94,Miljƶ- och marinbiologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804313,Miljƶ- och marinbiologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804710,MolekylƤrvetenskap och teknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,16,Musikvetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,17,Sibeliusmuseum,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,18,Sjƶhistoriska institutet,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,19,DnƤtverk Art History,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,20,Pedagogiska fakulteten,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,56,Nationalekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803110,Nationalekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,86,Nationella PET centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,87,Nationella PET-Centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804200,Nationella PET-centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280420,Nationella PET-centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804130,Naturmaterialteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804711,Naturmaterialteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804100,Naturvetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280470,Naturvetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804704,Naturvetenskaper gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280410,Naturvetenskaper och teknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,77,Naturvetenskapliga institutionen,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804160,Natvet&Tek gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,41,Nytestamentlig exegetik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,74,Offentlig fƶrvaltning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,112,Oorganisk kemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,82,Organisk kemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804200,PET-centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280420,PET-centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,117,PappersfƶrƤdling,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802100,Pedagogik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,21,Pedagogik gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,22,AllmƤn pedagogik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,23,Barnpedagogik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,24,Specialpedagogik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,25,Vuxenpedagogik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,26,De matematisk-naturvetenskapliga Ƥmnenas och idrottens didaktik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280210,Pedagogik och vƤlfƤrd,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,27,"Pedagogik, tillƤmpad pedagogik och yrkesundervisningens didaktik",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,28,Slƶjdpedagogik och husliga ekonomins didaktik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,29,SprĆ„k- och kulturvetenskapernas didaktik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,30,Vasa ƶvningsskola,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,31,VƖS gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,32,"VƖS, Ć„k 1-6",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,33,"VƖS, Ć„k 7-9",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,34,"VƖS, gymnasiet",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,35,Teologiska fakulteten,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,36,Teologiska fakulteten gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,37,Gammaltestamentlig exegetik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,38,Kyrkohistoria,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,39,Teologisk etik och religionsfilosofi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,20,Pedagogiska fakulteten,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,145,Personal,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806140,Personalservice,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806151,Planering,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,40,Praktisk teologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,41,Nytestamentlig exegetik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,42,Dogmatik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,43,DnƤtverk Old Testament studies,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,44,Institutionen fƶr psykologi och logopedi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,45,Psykologi och logopedi gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804722,Process och Systemteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804725,Process och energiteknik gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280472,Process- och energiteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804120,Process- och systemteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,46,Psykologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,47,Logopedi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,48,DnƤtverk Psychology and Logopedics,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,49,Handelshƶgskolan,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,50,Handelshƶgskolan gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,51,Fƶretagsekonomiska Ƥmnena,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801130,Psykologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,45,Psykologi och logopedi gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,52,Redovisning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,53,Internationell marknadsfƶring,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,54,Fƶretagets organisation och ledning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,55,Informationsfƶrvaltining,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,56,Nationalekonomi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,57,DnƤtverk School of Business and Economics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,147,Reform 2015,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,119,Reglerteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,139,Rektor,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280620,Rektors strategiska och centrala medel,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806200,Rektors strategiska och centrala medel,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,13,Religionsvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,6,Ryska,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803121,RƤttsvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803121,RƤttsvetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,66,RƤttsvetenskapliga institutionen,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,68,RƤttvetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,67,RƤttvetenskaper gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803122,Samforsk,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,75,Samforsk,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803120,SamhƤllsvetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280310,SamhƤllsvetenskaper och ekonomi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,17,Sibeliusmuseum,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806020,Sibeliusmuseum,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806030,Sjƶhistoriska insitutet,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,18,Sjƶhistoriska institutet,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806030,Sjƶhistoriska institutet,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,28,Slƶjdpedagogik och husliga ekonomins didaktik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,63,Socialpolitik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802120,Socialvetenskaper,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,58,Socialvetenskapliga institutionen,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,59,Socialvetenskapliga institutionen gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,60,Sociologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,61,Kvinnovetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,62,VĆ„rdvetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,63,Socialpolitik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,64,Utvecklingspsykologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,65,Demografi och landsbygdsforskning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,66,RƤttsvetenskapliga institutionen,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,67,RƤttvetenskaper gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,68,RƤttvetenskaper,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,69,Institutet fƶr mƤnskliga rƤttiheter,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,70,DnƤtverk Realizing Human Rights,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,24,Specialpedagogik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801110,SprĆ„k,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,29,SprĆ„k- och kulturvetenskapernas didaktik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,80,Statistik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,73,Statskunskap,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,71,Statsvetenskapliga institutionen,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,72,Statsvetenskapliga institutionen gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,73,Statskunskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,74,Offentlig fƶrvaltning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,75,Samforsk,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,76,Institutionen fƶr naturvetenskap gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,77,Naturvetenskapliga institutionen,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,78,Matematik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,79,Verkstaden,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,80,Statistik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,81,Fysik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,82,Organisk kemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,83,Fysikalisk kemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,84,Geologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,85,DnƤtverk Material Research,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,86,Nationella PET centret,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,87,Nationella PET-Centret,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,88,Institutionen fƶr biovetenskaper,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,89,Institutionen fƶr biovetenskaper gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,90,Biokemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,91,Farmaci,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,92,Cellbiologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,93,Husƶ biologiska station,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,94,Miljƶ- och marinbiologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,95,Biovetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,96,DnƤtverk Informational and Structural Biology,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,97,DnƤtverk Molecular Biosciences,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,98,Bioteknikcentrum,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,99,Bioteknikcentrum,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,100,Institutionen fƶr informationsteknologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,101,Instit. fƶr informationsteknologi gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,102,Datateknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,103,Datavetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,104,Informationssystem,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,105,Institute for Advanced Management System Research (IAMSR),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,106,DnƤtverk IT and Mathematics,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,7,Svenska,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,107,TUCS,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,108,ƅbo datatekniska forsknings- och utbildningscentrum (TUCS),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,109,Institutionen fƶr kemiteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,110,Insitutionen fƶr kemiteknik gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,111,Analytisk kemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,112,Oorganisk kemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,113,Teknisk polymerkemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280460,TUCS,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804723,Teknisk Kemi och Reaktionsteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,114,Teknisk kemi och reaktionsteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,115,TrƤ- och papperskemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,116,Fiber- och cellulosateknologi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,117,PappersfƶrƤdling,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,118,AnlƤggningsteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,119,Reglerteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,120,VƤrmeteknik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,121,Energiteknik Vasa,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,122,Industriell ekonomi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,123,DnƤtverk Chemical Engineering,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,124,Centret fƶr livslĆ„ngt lƤrande,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,125,Centret fƶr livslĆ„ngt lƤrande (CLL),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,126,Mediacity,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,127,Mediacity,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,128,Centret fƶr sprĆ„k och kommunikation,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,129,Centret fƶr sprĆ„k och kommunikation (CSK),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,130,Datacentralen,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,131,Datacentralen,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,132,ƅbo Akademis bibliotek,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,133,ƅAB,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,113,Teknisk polymerkemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,146,Teknisk service,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804050,Tekniska serviceenheten,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801120,Teologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,39,Teologisk etik och religionsfilosofi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,35,Teologiska fakulteten,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,36,Teologiska fakulteten gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,134,Tritonia,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806010,Tritonia,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,135,Tritonia 2011,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,115,TrƤ- och papperskemi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804600,Turku Centre for Computer Science,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,8,Tyska,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280610,Universitetsservice,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804712,Utbildning - Kemier,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804703,Utbildning - Naturvetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804724,Utbildning - Process och energiteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804350,Utbildning-Biovetenskaper,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804550,Utbildning-Informationsteknologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804150,Utbildning-Naturvetenskaper och teknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806122,"Utbildningsservice, FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806125,"Utbildningsservice, FNT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806123,"Utbildningsservice, FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806124,"Utbildningsservice, FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806121,"Utbildningsservice, central",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806121,"Utbildningsservice,Central",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806122,"Utbildningsservice,FHPT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806125,"Utbildningsservice,FNT",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806123,"Utbildningsservice,FPV",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806124,"Utbildningsservice,FSE",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,64,Utvecklingspsykologi,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,30,Vasa ƶvningsskola,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280220,Vasa ƶvningsskola,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,79,Verkstaden,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,25,Vuxenpedagogik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,Vƃā€“S Gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,Vƃā€“S Grundutbildning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,Vƃā€“S Gymnasiet,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,Vƃā€“S gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,Vƃā€“S grundutbildning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,Vƃā€“S gymnasiet,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,31,VƖS gemensamma,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,34,"VƖS, gymnasiet",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,32,"VƖS, Ć„k 1-6",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,33,"VƖS, Ć„k 7-9",, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,120,VƤrmeteknik,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,62,VĆ„rdvetenskap,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƃā€¦bo Akademis Bibliotek,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280600,ƃā€¦bo Akademis Bibliotek,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƃā€¦bo Akademis bibliotek,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280600,ƃā€¦bo Akademis bibliotek,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,136,ƅA gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,137,ƅA gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,138,Fastigheter,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,139,Rektor,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,140,Kansler,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,141,Fƶrvaltningen gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,142,Ekonomi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,143,Forskning och utbildning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,144,Kommunikation,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,145,Personal,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,146,Teknisk service,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,147,Reform 2015,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,,,http://isni.org/isni/0000000123753425,7 -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,5040300,Tulosyksikƶt,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030001,Geoenergia GNR,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030002,Yhdyskunnat ja rakentaminen YRA,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030003,Kalliorakentaminen ja sijoituspaikat KAS,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030004,YmpƤristƶgeologia YMP,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030005,Merigeologia MRG,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030006,Mineraalitalous ja malmigeologia MIM,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030007,Mineraalivarannot MIV,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030008,Teollisuusmineraalit TMI,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030009,Mineraalitekniikka ja materiaalit MMA,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030010,Turvevarannot TUR,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030011,Pohjavesi PVI,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030012,Geofysiikan sovellukset GSO,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030013,TuotantoympƤristƶt ja kierrƤtys TUK,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030014,Alueellinen geotieto ALG,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030015,Geotietovarannon hallinta GEH,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040300,504030016,Digitaaliset tuotteet ja palvelut DIP,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040200,5040200,Toiminnan johtaminen,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,5040200,50402003,Strateginen johto,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,50402003,5040200310,"Henkilƶstƶ, osaaminen ja tyƶympƤristƶt",, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,50402003,5040200320,ViestintƤ ja markkinointi,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,50402003,5040200330,Strategia ja suunnittelu,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,50402003,5040200340,Tieteellinen tutkimus,, -Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,50402003,5040200350,Digitaalisuus ja tietovarannot,, -Maanmittauslaitos,National Land Survey of Finland,LantmƤteriverket,4020217,,,,http://isni.org/isni/000000040494894X,1229 -MetsƤntutkimuslaitos,Finnish Forest Research Institute,Skogsforskningsinstitutet,404001,,,,http://isni.org/isni/0000000122651136,TWV0c8OkbnR1dGtpbXVzbGFpdG9z -Suomen Pankki,Suomen Pankki,Finlands bank,02022481,,,,http://isni.org/isni/0000000404104982,1888 -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,,,http://isni.org/isni/0000000110191419,1903 -SƤteilyturvakeskus,Radiation and Nuclear Safety Authority,StrĆ„lsƤkerhetscentralen,5550012,,,,http://isni.org/isni/000000011534674X,U8OkdGVpbHl0dXJ2YWtlc2t1cw== -Teknologiantutkimuskeskus VTT Oy,VTT Technical Research Centre of Finland Ltd,Teknologiska forskningscentralen VTT AB,26473754,,,,http://isni.org/isni/0000000121061548,2045 -Terveyden ja hyvinvoinninlaitos,National Institute for Health and Welfare,Institutet fƶr hƤlsa och vƤlfƤrd,5610017,,,,http://isni.org/isni/0000000110130499,2060 -Tyƶterveyslaitos,Finnish Institute of Occupational Health,ArbetshƤlsoinstitutet,02202669,,,,http://isni.org/isni/0000000404105926,VhnDtnRlcnZleXNsYWl0b3M= -Ulkopoliittinen instituutti,Finnish Institute of International Affairs,Utrikespolitiska institutet,1120017,,,,http://isni.org/isni/0000000406205576,2215 -Valtion taloudellinen tutkimuskeskus,VATT Institute for Economic Research,Statens ekonomiska forskningscentral,3060016,,,,http://isni.org/isni/0000000121621381,2396 -Tilastokeskus,Statistics Finland,Statistikcentralen,02454911,,,,http://isni.org/isni/0000000404105635, -Kotimaisten kielten keskus,Institute for the Languages of Finland,Institutet fƶr de inhemska sprĆ„ken,02458728,,,,http://isni.org/isni/0000000109457291, -Maaseutuvirasto,Agency for Rural Affairs in Finland,Landsbygdsverket,24053596,,,,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,02446292,,,,http://isni.org/isni/0000000446686757, -Ilmatieteen laitos,Finnish Meteorological Insitute,Meterologiska institutet,02446647,,,,http://isni.org/isni/0000000122538678, -VƤestƶrekisterikeskus,Population Register Center,Befolkningsregistercentralen,02454372,,,,, -CSC ā€“ Tieteen tietotekniikan keskus Oy,CSC - IT Center for Science Ltd,,09206320,,,,, -VƤylƤvirasto,Finnish Transport Infrastructure Agency,Trafikledsverket,10105471,,,,http://isni.org/isni/0000000405091319, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,133,ƅAB,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,132,ƅbo Akademis bibliotek,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,108,ƅbo datatekniska forsknings- och utbildningscentrum (TUCS),, From 06eb058c5c4b80c14df9f2343afed2b760eee61e Mon Sep 17 00:00:00 2001 From: Toni Date: Thu, 27 May 2021 17:34:41 +0300 Subject: [PATCH 008/160] fix duplicates in org.csv, comparison bug --- .../management/commands/update_orgs.py | 62 +-- .../resources/organizations/organizations.csv | 397 +----------------- 2 files changed, 43 insertions(+), 416 deletions(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index 5e9485fa..fa498b1b 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -23,31 +23,23 @@ ] -@dataclass() +@dataclass(order=True) class Organization: - org_name_fi: str = field() - org_name_en: str = field() - org_code: str = field() - org_name_sv: str = field(default="") - unit_sub_code: str = field(default="") - unit_name: str = field(default="") - org_isni: str = field(default="") - org_csc: str = field(default="") - unit_main_code: str = field(default="") - - def compare(self, other): - """Not overriding default __cmp__ for clarity""" - if ( - self.org_code == other.org_code - and self.unit_sub_code == other.unit_sub_code - and self.unit_main_code == other.unit_main_code - ): - return True - return False + org_name_fi: str = field(compare=False) + org_name_en: str = field(compare=False) + org_code: str = field(compare=True) + org_name_sv: str = field(default="", compare=False) + unit_sub_code: str = field(default="", compare=True) + unit_name: str = field(default="", compare=False) + org_isni: str = field(default="", compare=False) + org_csc: str = field(default="", compare=False) + unit_main_code: str = field(default="", compare=True) + + def compare_and_update(self, other): changes = 0 - match = self.compare(other) + match = self == other if match: if self.org_name_fi != other.org_name_fi: self.org_name_fi = other.org_name_fi @@ -61,12 +53,29 @@ def compare_and_update(self, other): if self.unit_name != other.unit_name: self.unit_name = other.unit_name changes += 1 + if self.org_isni is None and other.org_isni is not None: + self.org_isni = other.org_isni + changes += 1 + elif self.org_isni is not None and other.org_isni is None: + other.org_isni = self.org_isni + changes += 1 + + if self.org_csc is None and other.org_csc is not None: + self.org_csc = other.org_csc + changes += 1 + elif self.org_csc is not None and other.org_csc is None: + other.org_csc = self.org_csc + changes +=1 return match, changes def __str__(self): return f"{self.org_code}-{self.unit_sub_code}-{self.unit_name}" + def __post_init__(self): + if self.unit_name.endswith(";;"): + self.unit_name = self.unit_name[:-2] + def get_orgs_from_api() -> List[Organization]: res = requests.get( @@ -136,9 +145,9 @@ def handle(self, *args, **options): for i in api_orgs: match = False for a in loc_orgs: - match = i.compare(a) + match = i == a if not match: - logger.info(f"adding missing org {i.org_name_fi}, {i.unit_name}") + # logger.info(f"adding missing org {i.org_name_fi}, {i.unit_name}") union.append(i) added += 1 logger.info(f"Added {added} organisations from research.fi to local org list") @@ -146,11 +155,12 @@ def handle(self, *args, **options): s = sorted(union, key=lambda i: (i.org_name_fi, i.unit_name)) with open(settings.ORG_FILE_PATH, "w") as f: logger.info("writing updated csv") - no_duplicates = OrderedDict() + no_duplicates = [] for c in s: - no_duplicates[str(c)] = c + if c not in no_duplicates: + no_duplicates.append(c) - csv_serialized = [asdict(v) for k, v in no_duplicates.items()] + csv_serialized = [asdict(v) for v in no_duplicates] writer = csv.DictWriter( f, fieldnames=CSV_HEADERS, diff --git a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv index bd748565..c3216311 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv +++ b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv @@ -81,7 +81,6 @@ Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,T199,"Shool common, Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U200,U JSI,, Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U100,University level,, Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U,University level and joint,, -Aalto-yliopisto,Aalto University,Aalto-universitetet,10076,,U,"University level, Joint units & Endowment",, AlkoholitutkimussƤƤtiƶ,The Finnish Foundation for Alcohol Studies,,02009295,,,,, CSC - Tieteen tietotekniikan keskus Oy,CSC ā€“ IT Center for Science,,09206320,,,,, Centria-ammattikorkeakoulu,Centria University of Applied Sciences,,02536,,,,, @@ -118,45 +117,26 @@ Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,51,5 Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,75,75 KansainvƤlisyyspalvelut,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,,02623,,23,Alusta DIGI-USER,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,65,Digipalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,65,Digipalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,87,Hanketoiminta;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,63,Henkilostopalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,63,Henkilostopalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,68,Infrapalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,68,Infrapalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,84,Innovaatiotoiminta;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,67,Kampuspalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,67,Kampuspalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,88,Kansainvalinen toiminta,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,88,Kansainvalisyys;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,66,Kirjasto ja tietopalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,66,Kirjasto ja tietopalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,96,Kirkolliset opinnot,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,96,Kirkolliset opinnot;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,91,Koulutus,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,91,Koulutus;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,85,Liiketoiminta ja innovaatiot;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,74,Opiskelijapalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,74,Opiskelijapalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,86,Pedagogiikka ja kehittaminen;;,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,86,Pedagogiikka ja tutkimus,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,61,Rehtoraatti,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,61,Rehtoraatti;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,92,Sairaanhoitaja- ja terveydenhoitajatutkinnot (temk,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,93,Sairaanhoitaja- ja terveydenhoitajatutkinnot (temk,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,94,Sosionomitutkinnot (temkok 1-3),, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,94,Sosionomitutkinnot (temkok 1-3);;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,95,"Sosionomitutkinnot (temkok 4-7, DSS)",, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,95,"Sosionomitutkinnot (temkok 4-7, DSS);;",, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,87,TKI-palvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,84,TKI-toiminta,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,64,Talouspalvelut,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,64,Talouspalvelut;;,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,97,Tulkkaustutkinnot ja kielet,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,97,Tulkkaustutkinnot ja kielet;;,, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,85,Tyoelamapalvelut,, Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,62,"Viestinta, tiedotus ja markkinointi",, -Diakonia-ammattikorkeakoulu,Diaconia University of Applied Sciences,Diakonia-ammattikorkeakoulu,02623,,62,"Viestinta, tiedotus ja markkinointi;;",, Geologian tutkimuskeskus,Geological Survey of Finland,,5040011,,,,, Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030014,Alueellinen geotieto ALG,, Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,504030016,Digitaaliset tuotteet ja palvelut DIP,, @@ -203,7 +183,6 @@ Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentr Geologian tutkimuskeskus,Geological Survey of Finland,Geologiska forskningscentralen,5040011,,5040300331,"YmpƤristƶratkaisut, YmpƤristƶ",, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,,,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,AOKK,AOKK Ammatillinen opettajakorkeakoulu,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,AOKK,AOKK Ammatillinen opettajakorkeakoulu;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2400,Analytiikka ja kehittƤminen,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,MUBBA,DP for Multilingual Management Assistants,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,ABBA,DP in Aviation Business,, @@ -216,35 +195,15 @@ Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056 Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,SPORT,DP in Sports and Leisure Management,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,TOBBA,DP in Tourism,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,"DT-HTP, TKI, Kovat",, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Kovat ja muut (ei opetus);;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Medianomi-opetus Pasilassa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Medianomi-opetus Pasilassa;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi IT -opetus Pasilassa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi IT-opetus Pasilassa;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi-opetus Pasilassa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,DT,DT-Tradenomi-opetus Pasilassa;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5500,Digipalvelut ja tietohallinto,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2410,Digitaaliset palvelut,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,"EHTH-HTP, TKI ja Kovat Haagassa",, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,EHTH-Kovat ja muut (ei opetus) Haagassa;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,EHTH-Restonomi-opetus Haagassa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTH,EHTH-Restonomi-opetus Haagassa;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,"EHTP-HTP, TKI ja Kovat Porvoossa",, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Kovat ja muut (ei opetus) Porvoossa;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Restonomi-opetus Porvoossa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Restonomi-opetus Porvoossa;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Tradenomi-opetus Porvoossa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTP,EHTP-Tradenomi-opetus Porvoossa;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,"EHTV-HTP, TKI ja Kovat VierumƤellƤ",, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,EHTV-Kovat ja muut (ei opetus) Vierumaella;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,EHTV-Liikunnanohjaaja-koulutus VierumƤellƤ,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,EHTV,EHTV-Liikunnanohjaaja-opetus Vierumaella;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5510,Elinkaari- ja kƤyttƤjƤpalvelut,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,FINA,Finanssi- ja talousasiantuntijan koulutusohjelma,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2100,HR ja johtaminen,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,HTP,"HTP-Hallinto- ja tukipalvelut (KOPA, HR, Tieto, Ta",, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,HTP,HTP-Kaupalliset palvelut,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,HTP,HTP-Kaupalliset palvelut;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5200,Henkilƶstƶ ja kulttuuri,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,RUOKA,Hotelli- ja ravintola-alan ko.,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HOTRA,Hotelli- ja ravintola-alan liikkeenjohdon ko.,, @@ -264,11 +223,8 @@ Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga- Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,4200,Koulutusvienti,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,2000,Kp 2000,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LT,LT-Kovat ja muut (ei opetus);;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LT,"LT-TKI, HTP, Kovat",, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTM,LT-Tradenomi-opetus Malmilla,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTM,LT-Tradenomi-opetus Malmilla;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTP,LT-Tradenomi-opetus Pasilassa,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,LTP,LT-Tradenomi-opetus Pasilassa;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HELI,Liiketalouden ko,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,LIIPO,Liiketalouden ko,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,LOT,Liikunnan ja vapaa-ajan ko,, @@ -298,11 +254,6 @@ Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga- Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,1200,Restonomi,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,RUOKAT,Ruokatuotannon johtamisen ko,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Master-tiimi,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Start-Up School,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Start-Up School;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Tukipalvelut,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Tukipalvelutiimi;;,, -Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,TKI,TKI-Ylempi AMK-opetus;;,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,5300,Talous,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,,10056,,HETI,TietojenkƤsittelyn ko,, Haaga-Helia ammattikorkeakoulu,Haaga-Helia University of Applied Sciences,Haaga-Helia ammattikorkeakoulu,10056,,3650,Tietopalvelut,, @@ -360,111 +311,59 @@ Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistine Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,100,Kulttuuri,, Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,10,Taiteet ja kulttuurialat,, Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,20,Terveys- ja hyvinvointialat,, -Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,300,Tulkkaus,, Humanistinen ammattikorkeakoulu,HUMAK University of Applied Sciences,,02631,,50,Yhteiset,, -Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,400,Yhteiset palvelut,, Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,500,Yhteiset palvelut,, -Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,100,Yhteisƶjen kehittƤminen,, -Humanistinen ammattikorkeakoulu,Humak University of Applied Sciences,Humanistinen ammattikorkeakoulu,02631,,200,Yhteisƶpedagogi,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,,02467,,,,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,132,AOKK tukipalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,132,AOKK tukipalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,131,Ammatillinen erityisopettaja- ja opinto-ohjaajakou,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,4,Ammatillinen opettajakorkeakoulu,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,4,Ammatillinen opettajakorkeakoulu;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,2,Ammatillinen opettajakoulutus,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,2,Ammatillinen opettajakoulutus;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,60,Ammatillisen opettajakorkeakoulun tyoelamapalvelut,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,101,BIOS tukipalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,101,BIOS tukipalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,5,Bio- ja elintarviketekniikka,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,5,Bio- ja elintarviketekniikka;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,61,Biotalouden tyoelamapalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,61,Biotalouden tyoelamapalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,8,Biotalouden yksikko,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,8,Biotalouden yksikko;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,102,Biotalousinsinoori,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,102,Biotalousinsinoori;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,48,HAMK Bio,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,48,HAMK Bio;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,89,HAMK Design Factory,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,89,HAMK Design Factory;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,3,HAMK Edu,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,3,HAMK Edu;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,43,HAMK Smart,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,43,HAMK Smart;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,27,HAMK Tech,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,27,HAMK Tech;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,13,HYOS tukipalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,13,HYOS tukipalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,10,Hallinto-ja talouspalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,10,Hallinto-ja talouspalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,12,Henkilostopalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,12,Henkilostopalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,46,Hoitotyƶ,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,62,Hyvinvointiosaamisen tyoelamapalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,62,Hyvinvointiosaamisen tyoelamapalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,6,Hyvinvointiosaamisen yksikko,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,6,Hyvinvointiosaamisen yksikko;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,14,International Business,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,14,International Business;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,67,Kansainvaliset palvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,67,Kansainvaliset palvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,15,Kestava kehitys,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,15,Kestava kehitys;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,16,Kiinteistopalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,16,Kiinteistopalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,17,Kirjasto ja tietopalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,17,Kirjasto ja tietopalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,18,Konetekniikka,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,18,Konetekniikka;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,44,Koulutuksen tukipalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,44,Koulutuksen tukipalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,37,Liikenneala,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,37,Liikenneala;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,20,Liiketalous,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,20,Liiketalous;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,21,Maaseutuelinkeinot ja hevostalous,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,21,Maaseutuelinkeinot ja hevostalous;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,25,Metsatalous,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,25,Metsatalous;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,26,Muotoilu,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,26,Muotoilu;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,29,Puutarhatalous,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,29,Puutarhatalous;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,30,Rakennettu ymparisto,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,30,Rakennettu ymparisto;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,31,Rakennus- ja yhdyskuntatekniikka,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,31,Rakennus- ja yhdyskuntatekniikka;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,33,Sahko- ja automaatiotekniikka,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,33,Sahko- ja automaatiotekniikka;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,32,Sosiaaliala,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,32,Sosiaaliala;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,40,Strateginen viestinta,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,40,Strateginen viestinta;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,34,TEOS tukipalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,34,TEOS tukipalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,65,Teknologiaosaamisen tyoelamapalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,65,Teknologiaosaamisen tyoelamapalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,7,Teknologiaosaamisen yksikko,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,7,Teknologiaosaamisen yksikko;;,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,46,Terveys Hml ja Frs;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,35,Tieto- ja viestintatekniikka,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,35,Tieto- ja viestintatekniikka;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,36,Tietohallinto,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,36,Tietohallinto;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,38,Tietojenkasittely,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,38,Tietojenkasittely;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,42,YRLI tukipalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,42,YRLI tukipalvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,1,Yhteiset palvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,1,Yhteiset palvelut;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,41,Yleishallinto,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,41,Yleishallinto;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,11,Yrittajyyden ja liiketoimintaosaamisen yksikko,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,11,Yrittajyyden ja liiketoimintaosaamisen yksikko;;,, HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,66,Yrittajyyden tyoelamapalvelut,, -HƤmeen ammattikorkeakoulu,HƤme University of Applied Sciences,HƤmeen ammattikorkeakoulu,02467,,66,Yrittajyyden tyoelamapalvelut;;,, Ilmatieteen laitos,Finnish Meteorological Insitute,Meterologiska institutet,02446647,,,,http://isni.org/isni/0000000122538678, Ilmatieteen laitos,Finnish Meteorological Institute,,4940015,,,,, Innovaatiorahoituskeskus Business Finland,Innovaatiorahoituskeskus Business Finland,,05126964,,,,, @@ -486,15 +385,12 @@ ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,201030,Humanistinen osasto / Vieraat kielet ja kƤƤnnƶstiede,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,201010,Humanistinen osasto / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201020,"Humanistinen osasto, suomen kieli ja kulttuuritiet",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201020,"Humanistinen osasto, suomen kieli ja kulttuuritieteet",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201030,"Humanistinen osasto, vieraat kielet ja kƤƤnnƶst",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201030,"Humanistinen osasto, vieraat kielet ja kƤƤnnƶstiede",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,201010,"Humanistinen osasto, yhteiset",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,600000,600000,ItƤ-Suomen yliopiston apteekki,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,600010,ItƤ-Suomen yliopiston apteekki,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2050,ItƤ-Suomen yliopiston harjoittelukoulu,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,205010,ItƤ-Suomen yliopiston harjoittelukoulu,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,205010,"ItƤ-Suomen yliopiston harjoittelukoulu, Joensuu",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,205020,"ItƤ-Suomen yliopiston harjoittelukoulu, Savonlinna",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,700000,Jatkuvan oppimisen keskus,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,506010,Karjalan tutkimuslaitos,, @@ -504,20 +400,15 @@ ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202040,Kasvatustieteiden ja psykologian osasto / Psykologia,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,202010,Kasvatustieteiden ja psykologian osasto / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202040,"Kasvatustieteiden ja psykologian osasto, Psykologi",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202040,"Kasvatustieteiden ja psykologian osasto, Psykologia",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202020,"Kasvatustieteiden ja psykologian osasto, erityispe",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202020,"Kasvatustieteiden ja psykologian osasto, erityispedagogiikka",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202030,"Kasvatustieteiden ja psykologian osasto, kasvatust",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202030,"Kasvatustieteiden ja psykologian osasto, kasvatustiede, aikuiskasvatus ja ohjaus",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,202010,"Kasvatustieteiden ja psykologian osasto, yhteiset",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,502010,Kauppatieteiden laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,304010,Kemian laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,800010,Kielikeskus,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,800020,Kirjasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,405010,Koe-elƤinkeskus,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,700000,Koulutus- ja kehittƤmispalvelu Aducate,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan ha,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan hallinto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,300010,Luonnontieteiden ja metsƤtieteiden tiedekunnan hallinto / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,4040,LƤƤketieteen laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404020,LƤƤketieteen laitos / BiolƤƤketiede,, @@ -526,19 +417,15 @@ ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404040,LƤƤketieteen laitos / Kliininen lƤƤketiede,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,404010,LƤƤketieteen laitos / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404020,"LƤƤketieteen laitos, biolƤƤketiede",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404020,"LƤƤketieteen laitos, biolƤƤketieteen yksikkƶ",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404050,"LƤƤketieteen laitos, hammaslƤƤketiede",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404030,"LƤƤketieteen laitos, kansanterveystiede ja kliininen ravitsemustiede",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404030,"LƤƤketieteen laitos, kansanterveystieteen ja kli",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404040,"LƤƤketieteen laitos, kliininen lƤƤketiede",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404040,"LƤƤketieteen laitos, kliinisen lƤƤketieteen yk",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,404010,"LƤƤketieteen laitos, yhteiset",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,507010,Matkailualan opetus- ja tutkimuslaitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,300000,309010,MekrijƤrven tutkimusasema,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,305010,MetsƤtieteiden osasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,503010,Oikeustieteiden laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,308010,SIB-labs -infrastruktuuriyksikkƶ,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,308010,SIB-labs infrastruktuuriyksikkƶ,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,504010,Sosiaali- ja terveysjohtamisen laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,303510,Sovelletun fysiikan laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203010,Soveltavan kasvatustieteen ja opettajankoulutuksen,, @@ -548,7 +435,6 @@ ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,203030,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto / Savonlinna,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,203010,Soveltavan kasvatustieteen ja opettajankoulutuksen osasto / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203030,"Soveltavan kasvatustieteen ja opettajankoulutuksen osasto, Savonlinna",, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,203010,"Soveltavan kasvatustieteen ja opettajankoulutuksen osasto, yhteiset",, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,2040,Teologian osasto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,204020,Teologian osasto / LƤntinen teologia,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,200000,204030,Teologian osasto / Ortodoksinen teologia,, @@ -560,7 +446,6 @@ ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,400000,400010,Terveystieteiden tiedekunnan hallinto / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,306020,TietojenkƤsittelytieteen laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,500010,Yhteiskuntatieteiden ja kauppatieteiden tiedekunna,, -ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,500010,Yhteiskuntatieteiden ja kauppatieteiden tiedekunnan hallinto,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,500000,500010,Yhteiskuntatieteiden ja kauppatieteiden tiedekunnan hallinto / Yhteiset,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,505010,Yhteiskuntatieteiden laitos,, ItƤ-Suomen yliopisto,University of Eastern Finland,Ɩstra Finlands universitet,10088,,100000,"Yliopiston johto, yhteiset ja yliopistopalvelut",, @@ -613,7 +498,6 @@ JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,2 JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,212000,Yhteiskuntatieteellinen tiedekunta,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,212000,212010,Yhteiskuntatieteiden ja filosofian laitos,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,213070,Yhteiskuntatieteiden ja filosofian laitos,, -JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219220,Yliopiston kielikeskus,, JyvƤskylƤn yliopisto,University of JyvƤskylƤ,JyvƤskylƤ universitet,01906,,219500,Yliopistopalvelut,, KAUTE-sƤƤtiƶ,,,02014465,,,,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,,10118,,,,, @@ -631,22 +515,17 @@ Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied S Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66600,Kymilabs,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,11000,"Liiketalouden koulutusyksikkƶ, Kouvola",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,12000,"Liiketalouden koulutusyksikkƶ, Mikkeli",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,31000,Liikunnan ja kuntoutuksen koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,31000,Liikunta- ja kuntoutusalan koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,25000,Logistiikan ja merenkulun koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,62300,Logistiikka ja merenkulku,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,62500,Luovat alat,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66800,Luovien alojen tutkimusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,35000,"Matkailu, ravitsemis- ja nuorisoalan koulutusyksik",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,35000,"Matkailu, ravitsemis- ja nuorisoalan koulutusyksikkƶ",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,30000,35000,"Matkailu-,ravitsemis- ja nuorisoalan koulutusyksikkƶ",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,62200,"MetsƤ, ympƤristƶ ja energia",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,24000,MetsƤtalouden ja ympƤristƶteknologian koulutusy,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,24000,MetsƤtalouden ja ympƤristƶteknologian koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66300,Mikpolis,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,60000,66200,Nuorisoalan tutkimus- ja kehittƤmiskeskus Juvenia,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66200,Nuorisoalan tutkimus- ja kehittƤmisyksikkƶ Juven,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66200,Nuorisoalan tutkimus- ja kehittƤmisyksikkƶ Juvenia,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,40000,Opetuksen hallinto,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,51000,Opetuksen palvelut,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,66900,Pienyrityskeskus,, @@ -654,16 +533,11 @@ Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied S Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,21000,Rakennus- ja energiatekniikan koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,33000,Sosiaali- ja terveysalan Mikkelin koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,36000,Sosiaali- ja terveysalan Savonlinnan koulutusyksik,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,36000,Sosiaali- ja terveysalan Savonlinnan koulutusyksikkƶ,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,33000,Sosiaali- ja terveysalan koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,34000,Sosiaalialan ja toimintakyvyn edistƤmisen koulutu,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,34000,Sosiaalialan ja toimintakyvyn edistƤmisen koulutusyksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,20000,23000,"SƤhkƶ-, talo- ja materiaalitekniikan koulutusyksikkƶ",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,23000,"SƤhkƶ-, talo- materiaalitekniikan koulutusyksikk",, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,23000,"SƤhkƶ-, talo- materiaalitekniikan koulutusyksikkƶ",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,60000,TKI ja palvelut,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,61000,TKI-palvelut,, -Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,61000,TKI-palvelut -yksikkƶ,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences (Xamk),,10118,10000,10000,Talous ja kulttuuri yhteiset,, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,70000,"Talous, henkilƶstƶ- ja hallintopalvelut",, Kaakkois-Suomen ammattikorkeakoulu,South-Eastern Finland University of Applied Sciences,Kaakkois-Suomen ammattikorkeakoulu,10118,,10000,Talous- ja kulttuuri yhteiset,, @@ -831,12 +705,9 @@ Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250400,Pohjo Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224100,Pohjoisen yhteiskunnan tutkimusinstituutti,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,224100,2241100,Pohjoisen yhteiskunnan tutkimusinstituutti,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2241100,Pohjoisen yhteiskunnan tutkimusinstituutti (LAPPEA,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2241100,Pohjoisen yhteiskunnan tutkimusinstituutti (LAPPEA),, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250300,Pohjoisen ymparisto- ja vahemmistooikeuden institu,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2250300,Pohjoisen ymparisto- ja vahemmistooikeuden instituutti,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,225000,2250300,Pohjoisen ympƤristƶ- ja vƤhemmistƶoikeuden instituutti,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298802,Pohjoiset kulttuurit ja kestava luonnonvarapolitii,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298802,Pohjoiset kulttuurit ja kestava luonnonvarapolitiikka,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298802,Pohjoiset kulttuurit ja kestƤvƤ luonnonvarapolitiikka,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,20026300,PƤƤkirjasto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,20026300,200263100,PƤƤkirjasto,, @@ -865,7 +736,6 @@ Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291300,Tieto Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291500,Tilahallinto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2295800,Tutkijakoulu,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2291800,Tutkimuspalvelut,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2293100,Tutkimusvararehtori,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290600,TƤydentƤvƤn rahoituksen palvelupiste,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297600,Uarctic perustoiminta,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2297500,Uarctic-verkosto,, @@ -876,7 +746,6 @@ Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229000,2290500 Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2240000,YTK hallinto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2240100,YTK perustoiminta,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299040,Yhteiskuntatieteiden opetuksen ja tutkimuksen raha,, -Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2299040,Yhteiskuntatieteiden opetuksen ja tutkimuksen rahasto,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,224000,Yhteiskuntatieteiden tiedekunta,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,,2298803,Yhteisot ja muuttuva tyo,, Lapin yliopisto,University of Lapland,Lapplands universitet,01918,229800,2298803,Yhteisƶt ja muuttuva tyƶ,, @@ -1131,7 +1000,6 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B237A,Aurinkotalousprofessuuri,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B220G,Bioenergian laboratorio/Mikkeli,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E122A,Bus yhteiset,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E122A,Business studies,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B233A,CDMC-laboratorio,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B301A,CST,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B24BE,Circular Economy Lahti,, @@ -1165,8 +1033,6 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B25AE,Industrial Desing Engineering,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B2A,Innovation Management,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3E1E,Innovation management/LAHTI,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E123A,International Business and Entrepreneurship (IBE),, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E121G,International Business and Entrepreneurship-Mikkel,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12DE,International Marketing (IM) Lahti,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E125A,International Marketing(IM),, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E100A,Kanslia,, @@ -1188,7 +1054,6 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K666A,LABin kirjastopalvelut,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K667A,LAMKin kirjastopalvelut,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E124A,LAMO,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E100A,LBM Office,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E105A,LBM/Management Research lab,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B303A,LENS tekniset palvelut,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J400A,LUT Investointiohjelma,, @@ -1203,8 +1068,6 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B218A,Laskennallinen virtausmekaniikka,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B30BA,Laskennallisen tekniikan koulutusohjelma,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B211A,LƤmpƶ- ja virtaustekniikan laboratorio,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E125A,MIMM,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E129A,MSF&MBAN,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E126A,MSIS,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E127A,MSM,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE30A,Matkustus- ja kv-tyƶskentelyn palvelut,, @@ -1222,7 +1085,6 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1ADA,Platform LBM DIGI-USER,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1ACA,Platform LBM RE-SOURCE,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AFA,Platform LBM RED,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AAA,Platform LBM Reflex,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1AEA,Platform LBM SAWE,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E1ABA,Platform LBM SIM,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3AAA,Platform LENS Reflex,, @@ -1250,7 +1112,6 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B309A,School of Engineering Science yhteiset,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E102A,Schoolin yhteiset,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B350A,Separation Science,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B350A,Separation and Purification Technology,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD30A,Sihteeripalvelut,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B1A,Software Engineering,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3C0A,Software Engineering,, @@ -1259,15 +1120,12 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD10A,Strateginen suunnittelu ja toiminnan ohjaus,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12CA,Strategy and accounting (SA),, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B5A,Supply Chain Management,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E127A,Supply Management(SM),, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B3B4A,Systems Engineering,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B231A,SƤhkƶmarkkinalaboratorio,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B232A,SƤhkƶnkƤyttƶtekniikan laboratorio,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B230A,SƤhkƶtekniikan laboratorio,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B234A,SƤƤtƶ- ja digitaalitekniikan laboratorio,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KD42A,TES- ja tyƶnantajapalvelut,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12AA,TIJO-LPR,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23E12BE,TIJO-Lahti,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE41A,Talouden suunnittelu ja seuranta,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B212A,Teknillinen termodynamiikka,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B256A,TerƤsrakenteiden laboratorio,, @@ -1281,9 +1139,7 @@ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“ Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23J110A,Toiminta,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B30DA,Tuotantotalouden koulutusohjelma,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B254A,Tuotantotekniikan laboratorio,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K300A,Tutkimuksen palvelut,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K340A,Tutkimuspolitiikka,, -Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23KE21A,Tyƶterveys- tyƶhyvinvointi- ja tyƶsuhdepalvelut,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K880A,TƤydennyskoulutus liiketoiminta,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23K881A,TƤydennyskoulutus yhteisrahoitteinen,, Lappeenrannanā€“Lahden teknillinen yliopisto LUT,LUT University,Lappeenrannanā€“Lahden teknillinen yliopisto LUT,01914,,23B214A,Uusiutuvien energiajƤrjestelmien laboratorio,, @@ -1320,9 +1176,7 @@ Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100 Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411410,"Aineistopalvelut Jokioinen, kotielƤintuotanto ja uudet tuotantomuodot",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410410,"Aineistopalvelut Kokkola, SeinƤjoki",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410910,Aineistopalvelut Maaninka,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410910,"Aineistopalvelut Maaninka, Kainuu",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411010,"Aineistopalvelut Oulu, Siikajoki",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410410,"Aineistopalvelut Parkano, Kokkola, SeinƤjoki",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410710,"Aineistopalvelut Rovaniemi, Inari, Utsjoki",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411510,Aineistopalvelut Suonenjoki,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110910,Akvaattisten populaatioiden dynamiikka,, @@ -1331,7 +1185,6 @@ Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100 Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211110,Bioraaka-aineiden rakenne ja ominaisuudet,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510310,Biotalouden kannattavuus,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510210,Biotalouden tilastot,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310110,Biotalous ja ympƤristƶ / BITA Esikunta (4100310110),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310610,Biotalous ja ympƤristƶ / Hiilen kierron hallinta (4100310610),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310810,Biotalous ja ympƤristƶ / KestƤvyystutkimus ja indikaattorit (4100310810),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310210,Biotalous ja ympƤristƶ / Luonnonvarapolitiikat ja -markkinat (4100310210),, @@ -1344,21 +1197,14 @@ Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100 Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210310,ElƤingenetiikka,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211510,ElƤinravitsemus,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610110,Esikunta/PƤƤjohtaja,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210310,Genomiikka ja jalostus,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610410,Henkilƶstƶpalvelut,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310610,Hiilen kierron hallinta,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410110,INFRA Esikunta,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610710,IT ja digitaaliset ratkaisut,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111110,Kalastus ja kalavarat,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210210,Kasvigenetiikka,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110610,Kasvinterveys,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310810,KestƤvyystutkimus ja indikaattorit,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110110,LUVA Esikunta,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211010,Liha- ja non-food elƤintuotanto,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310210,Luonnonvarapolitiikat ja -markkinat,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110910,Luonnonvarat / Akvaattisten populaatioiden dynamiikka (4100110910),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110610,Luonnonvarat / Kasvinterveys (4100110610),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110110,Luonnonvarat / LUVA Esikunta (4100110110),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110510,Luonnonvarat / MaaperƤekosysteemit (4100110510),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110710,Luonnonvarat / Metsien terveys ja biodiversiteetti (4100110710),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110310,Luonnonvarat / MetsƤnhoito (4100110310),, @@ -1366,71 +1212,25 @@ Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100 Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110810,Luonnonvarat / Riistapopulaatioiden dynamiikka (4100110810),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111010,Luonnonvarat / Soveltava tilastotiede (4100111010),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110410,Luonnonvarat / Vesistƶkuormitus (4100110410),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310710,MaankƤyttƶ ja aluesuunnittelu,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110510,MaaperƤekosysteemit,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310710,"Maaseutu, maankƤyttƶ ja luonnonvarojen hallinta",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210710,Maatalouden teknologiat,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210910,Maidontuotanto,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110710,Metsien terveys ja biodiversiteetti,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110310,MetsƤnhoito,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210410,MetsƤnjalostus,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210610,MetsƤteknologia ja logistiikka,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310510,MetsƤvarojen inventointi ja metsƤsuunnittelu,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211410,Nurmet ja kestƤvƤ maatalous,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610110,PalveluryhmƤt / Esikunta/PƤƤjohtaja (4100610110),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610410,PalveluryhmƤt / Henkilƶstƶpalvelut (4100610410),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610710,PalveluryhmƤt / IT ja digitaaliset ratkaisut (4100610710),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610610,PalveluryhmƤt / Talous- ja toimitilat (4100610610),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610210,PalveluryhmƤt / Tutkimus ja asiakkuudet (4100610210),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610510,PalveluryhmƤt / ViestintƤ ja markkinointi (4100610510),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610310,PalveluryhmƤt / Yhteiskuntasuhteet ja hallinto (4100610310),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110210,Peltokasvien tuotanto,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210510,Puutarhateknologiat,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110810,Riistapopulaatioiden dynamiikka,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111010,Soveltava tilastotiede,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510110,TIPA Esikunta,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210110,TUJA Esikunta,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610610,Talous- ja toimitilat,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510310,Tilastopalvelut / Biotalouden kannattavuus (4100510310),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510210,Tilastopalvelut / Biotalouden tilastot (4100510210),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510110,Tilastopalvelut / TIPA Esikunta (4100510110),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510410,Tilastopalvelut / Tilastotuotannon menetelmƤt (4100510410),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100510410,Tilastotuotannon menetelmƤt,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211610,TuotantoelƤinten hyvinvointi,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211210,TuotantojƤrjestelmƤt / Biojalostusteknologiat ja tuotteet (4100211210),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211110,TuotantojƤrjestelmƤt / Bioraaka-aineiden rakenne ja ominaisuudet (4100211110),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211310,TuotantojƤrjestelmƤt / Elintarvikkeiden prosessointi ja laatu (4100211310),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210310,TuotantojƤrjestelmƤt / ElƤingenetiikka (4100210310),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210210,TuotantojƤrjestelmƤt / Kasvigenetiikka (4100210210),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100211010,TuotantojƤrjestelmƤt / Liha- ja non-food elƤintuotanto (4100211010),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210710,TuotantojƤrjestelmƤt / Maatalouden teknologiat (4100210710),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210910,TuotantojƤrjestelmƤt / Maidontuotanto (4100210910),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210410,TuotantojƤrjestelmƤt / MetsƤnjalostus (4100210410),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210610,TuotantojƤrjestelmƤt / MetsƤteknologia ja logistiikka (4100210610),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210510,TuotantojƤrjestelmƤt / Puutarhateknologiat (4100210510),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210110,TuotantojƤrjestelmƤt / TUJA Esikunta (4100210110),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210810,TuotantojƤrjestelmƤt / Vesiviljelyratkaisut (4100210810),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610210,Tutkimus ja asiakkuudet,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410510,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut HaapastensyrjƤ, Suonenjoki (4100410510)",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411110,Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Helsinki (4100411110),, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410610,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Joensuu, Savonlinna (4100410610)",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411210,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Jokioinen, Piikkiƶ (4100411210)",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410910,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Maaninka, Kainuu (4100410910)",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100411010,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Oulu, Siikajoki (4100411010)",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410410,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Parkano, Kokkola, SeinƤjoki (4100410410)",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410710,"Tutkimusinfrastruktuuripalvelut / Aineistopalvelut Rovaniemi, Inari, Utsjoki (4100410710)",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410110,Tutkimusinfrastruktuuripalvelut / INFRA Esikunta (4100410110),, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410810,"Tutkimusinfrastruktuuripalvelut / Vesiviljely Enonkoski, Laukaa, Taivalkoski, Keminmaa (4100410810)",, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100111210,Vaelluskalat ja rakennetut joet,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100110410,Vesistƶkuormitus,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410810,"Vesiviljely Enonkoski, Inari, Keminmaa, Laukaa, Paltamo, Taivalkoski",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100410810,"Vesiviljely Enonkoski, Laukaa, Taivalkoski, Keminmaa",, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100210810,Vesiviljelyratkaisut,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610510,ViestintƤ ja markkinointi,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310410,Virkistys ja luontoarvot,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100610310,Yhteiskuntasuhteet ja hallinto,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100311010,YmpƤristƶ- ja luonnonvaratalous,, -Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310310,Yritys- ja ympƤristƶtalous,, Luonnonvarakeskus,Natural Resources Institute Finland,Naturresursinstitutet,4100010,,4100310910,Yritystalous ja liiketoimintamallit,, LƤƤkealan turvallisuus- ja kehittƤmiskeskus,The Finnish Medicines Agency,,558005,,,,, Maanmittauslaitos,National Land Survey of Finland,,4020217,,,,, @@ -1450,13 +1250,11 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,423,EsittƤvƤ taide,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,329,Hakijapalvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,478,Hankintapalvelut,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,478,Hankintapalvelut;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,190,Henkilƶstƶpalvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,447,Hyvinvointi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,451,ICT ja tuotantotalous,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,436,"Informaatiotekniikka ja pelisovellukset, tiimi",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,477,Jatkuvan oppimisen kehittƤminen,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,477,Jatkuvan oppimisen kehittƤminen;;Development of L,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,405,Jatkuvan oppimisen palvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,219,JƤrjestelmƤyllƤpito,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,418,KansainvƤlinen liiketoiminta,, @@ -1468,11 +1266,8 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropol Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,207,Kirjasto- ja tietopalvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,343,"Kirjasto- ja tietopalvelut, Arabia-Myllypuro",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,344,"Kirjasto- ja tietopalvelut, Karamalmi-MyyrmƤki",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,344,"Kirjasto- ja tietopalvelut, Lite-tiimi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,343,"Kirjasto- ja tietopalvelut, Sote-tiimi",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,341,"Kirjasto- ja tietopalvelut, TeKu-tiimi",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,471,Kliinisen hoidon tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,471,Kliinisen hoidon tiimi;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,411,Kliinisen hoitotyƶn ja ensihoidon palvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,424,Konservointi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,465,"Konservointi, 3D, XR -tiimi",, @@ -1482,7 +1277,6 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropol Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,460,Kulttuurituotannon ja vaatetuksen tiimi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,425,Kulttuurituotanto,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,474,Kuntoutuksen tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,474,Kuntoutuksen tiimi;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,446,Kuntoutus ja tutkiminen,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,429,"Kv-liiketoiminta, YAMK ja TKI",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,428,"Kv-liiketoiminta, hallinto",, @@ -1490,10 +1284,8 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropol Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,220,KƤyttƤjƤtuki,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,442,Laki- ja arkistointipalvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,468,Liiketalouden AMK-tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,468,Liiketalouden AMK-tiimi;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,469,Liiketalouden YAMK- ja KV-tiimi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,452,Liiketalous,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,405,Liiketoiminta,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,409,Liikkuminen ja toimintakyky,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,457,Me3 -tiimi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,419,Media,, @@ -1502,7 +1294,6 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,426,Muotoilu,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,464,"Muotoilu, teknot ja yleisaineet -tiimi",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,470,Musiikin tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,470,Musiikin tiimi;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,414,Musiikki,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,438,NƤyttƶkeskus,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,209,Opintoasiainpalvelut,, @@ -1515,7 +1306,6 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropol Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,448,Puhtaat ja ƤlykkƤƤt ratkaisut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,408,Puhtaat teknologiat,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,475,PƤƤasia -tiimi,, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,475,PƤƤasia -tiimi;;,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,407,Rakentaminen ja arkkitehtuuri,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,455,Rakentaminen ja arkkitehtuuri -tiimi,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,180,Rehtorin yksikkƶ,, @@ -1546,47 +1336,31 @@ Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropol Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,479,TƤydennyskoulutus- ja yrityspalvelut,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,,10065,,427,Vaatetus,, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,466,"Vanhustyƶ, kuntoutus, toimintaterapia -tiimi",, -Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,466,"Vanhustyƶ, kuntoutus, toimintaterapia -tiimi;;",, Metropolia ammattikorkeakoulu,Metropolia University of Applied Sciences,Metropolia ammattikorkeakoulu,10065,,340,ViestintƤyksikkƶ,, MetsƤntutkimuslaitos,Finnish Forest Research Institute,Skogsforskningsinstitutet,404001,,,,http://isni.org/isni/0000000122651136,TWV0c8OkbnR1dGtpbXVzbGFpdG9z Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,,,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102200,Ammatillinen opettajakoulutus,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102200,Ammatillinen opettajakoulutus;;Vocational Teacher,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104400,Ammatillisen opettajankoulutuksen yksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102600,Energia ja automaatio,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102600,Energia ja automaatio;;Energy and Automation,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102120,Hallintopalvelut,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102120,Hallintopalvelut;;Administration (Services),, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102000,Hallintoyksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104000,Hallintoyksikkƶ,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102000,Hallintoyksikkƶ;;Administrative Unit,, +Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102000,Hallintoyksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102300,Hoitoalat (Oulu ja Oulainen),, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102300,Hoitoalat (Oulu ja Oulainen);;Health Care and Nurs,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102750,Informaatioteknologia,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102750,Informaatioteknologia;;Information Technology,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104800,Informaatioteknologian yksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102100,KehittƤminen,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102100,KehittƤminen;;Development,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102700,Konetekniikka,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102700,Konetekniikka;;Mechanical Engineering,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102900,Kulttuuri,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102900,Kulttuuri;;Media and Performing Arts,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104900,Kulttuurialan yksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104600,Liiketalouden yksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102500,Liiketalous,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102500,Liiketalous;;Business,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102800,Luonnonvara-ala,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102800,Luonnonvara-ala;;Natural Resources,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102650,Rakentamistekniikka,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102650,Rakentamistekniikka;;Civil EngineeringƤ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104500,Sosiaali- ja terveysalan yksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102400,Sosiaaliala ja kuntoutus,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102400,Sosiaaliala ja kuntoutus;;Social Services and Reha,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,104700,Tekniikan ja Luonnonvara-alan yksikkƶ,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102350,Terveydenhuollon erityisalat,, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102350,Terveydenhuollon erityisalat;;Special Fields in He,, Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102110,"ViestintƤ, markkinointi ja aluevaikuttavuus",, -Oulun ammattikorkeakoulu,Oulu University of Applied Sciences,Oulun ammattikorkeakoulu,02471,,102110,"ViestintƤ, markkinointi ja aluevaikuttavuus;;Comm",, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10220,Ammatillinen opettajakoulutus,, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10260,Energia ja automaatio,, Oulun seudun ammattikorkeakoulu,Oulu University of Applied Sciences,,02471,,10212,Hallintopalvelut,, @@ -1684,12 +1458,8 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240720,Cen Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240720,2407200,Center for Health and Technology,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409690,Controller lƤhipalvelutiimi,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240969,Controller lƤhipalvelutiimi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240969,Controlling,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409690,Controlling,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240273,Disease Networks,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402730,Disease Networks,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240271,ECM and Hypoxia,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402710,ECM and Hypoxia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240334,ELITE,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2403340,ELITE,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240260,Eklund research group,, @@ -1770,21 +1540,18 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400332,Informa Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24068,Infotech,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240680,Infotech,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406800,Infotech,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409380,Infrapalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2445,Infrastruktuuri,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240949,Innovaatio- ja yrittƤjyyspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240949,2409245,Innovaatio- ja yrittƤjyyspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24290,Innovaatiokeskus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402741,Integrin functions,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409492,International Avenue,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240225,IonosfƤƤrifysiikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402250,IonosfƤƤrifysiikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240402,Johtamisen ja kansainvƤlisen liiketoiminnan yksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240402,2404020,Johtamisen ja kansainvƤlisen liiketoiminnan yksikkƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2460,Johto ja palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24092,Johto ja palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240921,Johto ja palvelut - yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2460,Johto ja palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409210,Johto ja palvelut - yhteiset,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240921,Johto ja palvelut - yhteiset,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240611,Jokapaikan tietotekniikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406110,Jokapaikan tietotekniikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240262,Juffer and Ohlmeier research group,, @@ -1792,7 +1559,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240262,2402620,J Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24025,KAIVANNAISALAN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24010,KASVATUSTIETEIDEN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24011,KASVATUSTIETEIDEN TIEDEKUNTA,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24011,KASVATUSTIETEIDEN TIEDEKUNTA ilman harjoittelukoul,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24010,24011,KASVATUSTIETEIDEN TIEDEKUNTA ilman harjoittelukouluja,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24030,240335,KNK- ja silmƤtautien tutkimusyksikkƶ 31.1.2016 saakka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240335,2403350,KNK- ja silmƤtautien tutkimusyksikkƶ 31.1.2016 saakka,, @@ -1878,8 +1644,8 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240543,Konetekn Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405430,Konetekniikan ala,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24193,Koulutus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408104,Koulutus- ja tietopalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240923,Koulutuspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409230,Koulutuspalvelut,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240923,Koulutuspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409412,Koulutuspalvelut HKT 31.7.2016 saakka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409432,Koulutuspalvelut Kontinkangas 31.7.2016 saakka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240942,2409422,Koulutuspalvelut LUTK 31.7.2016 saakka,, @@ -1894,10 +1660,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409680,Koulutu Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240968,Koulutuspalvelut lƤhipalvelutiimi TSTK,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240965,Koulutuspalvelut lƤhipalvelutiimi TTK ja LuTK,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409650,Koulutuspalvelut lƤhipalvelutiimi TTK ja LuTK,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409650,"Koulutuspalvelut lƤhipalvelutiimi TTK, LuTK, KaTK",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240965,"Koulutuspalvelut lƤhipalvelutiimi TTK, LuTK, KaTK",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240923,Koulutuspalvelut yhteiset palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409230,Koulutuspalvelut yhteiset palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240842,Koulutusvienti,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408420,Koulutusvienti,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240557,Kuitu- ja partikkelitekniikka,, @@ -1920,8 +1682,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240778,LEAF,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407780,LEAF,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24020,LUONNONTIETEELLINEN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240810,2408103,Lainaus- ja neuvontapalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240921,Laki ja sopimus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409210,Laki ja sopimus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24191,Laki- ja sopimuspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240404,Laskentatoimen yksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240404,2404040,Laskentatoimen yksikkƶ,, @@ -2017,8 +1777,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2401030,Oppimin Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2408063,Optinen spektroskopia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240582,Optoelektroniikka ja mittaustekniikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405820,Optoelektroniikka ja mittaustekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240273,Organogenesis,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402730,Organogenesis,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402341,OuLUMA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24085,Oulangan tutkimusasema,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240771,Oulangan tutkimusasema,, @@ -2051,7 +1809,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405380,Paja,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240941,Palvelupisteet henkilƶstƶhallinto,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240942,Palvelupisteet koulutuspalvelut 31.7.2016 saakka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240943,Palvelupisteet talouspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2460,Palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240271,Pihlajaniemi and Heljasvaara research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240271,2402710,Pihlajaniemi and Heljasvaara research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240332,Pohjois-Suomen syntymƤkohortti,, @@ -2066,16 +1823,14 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240952,Professo Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409520,Professori Pentti Kaiteran rahasto,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240964,Projektitalous,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409640,Projektitalous,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240544,Prosessi- ja ympƤristƶtekniikan ala,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405440,Prosessi- ja ympƤristƶtekniikan ala,, +Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240544,Prosessi- ja ympƤristƶtekniikan ala,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240550,2405500,Prosessi- ja ympƤristƶtekniikan os. yht. (ei kƤytƶssƤ),, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240550,Prosessi- ja ympƤristƶtekniikan osasto (ei kƤytƶssƤ),, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240560,Prosessimetallurgia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405600,Prosessimetallurgia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240272,Protein and Structural Biology,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402720,Protein and Structural Biology,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240272,Protein folding,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402720,Protein folding,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2470,Rahastot,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240406,Rahoituksen yksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240406,2404060,Rahoituksen yksikkƶ,, @@ -2146,8 +1901,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240593,2405941,T Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405941,TST Tyƶpaja henkilƶstƶkulut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405944,TST tyƶpaja infra,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405934,TST yhteisten muu henkilƶstƶ,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240544,TTK koulutus,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405440,TTK koulutus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405460,TTK tutkimuksen strateginen rahoitus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240546,TTK tutkimuksen strateginen rahoitus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409240,Talouden suunnittelu ja kehitys,, @@ -2156,9 +1909,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240943,2409433,T Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240943,2409473,Taloushallinto keskitetyt talouspalvelut palvelupiste,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240924,Talouspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240924,2409240,Talouspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409240,Talouspalvelut keskitetyt palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409640,Talouspalvelut lƤhipalvelutiimi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240964,Talouspalvelut lƤhipalvelutiimi,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409241,Talouspalvelut yhteiset palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24040,240403,Taloustieteen yksikkƶ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240403,2404030,Taloustieteen yksikkƶ,, @@ -2189,8 +1939,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409300,Tietoha Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405953,TietojenkƤsittelytietieteiden ala,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409360,TietojƤrjestelmƤpalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405951,Tietotekniikan ala,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240925,Tila- ja turvallisuuspalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409250,Tila- ja turvallisuuspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409115,Tilanvaraustilat,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240546,Tohtorikoulutettavat,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240546,2405460,Tohtorikoulutettavat,, @@ -2205,8 +1953,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24092,240928,Tut Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240928,2409280,Tutkimuksen strategiset palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240948,Tutkimuksen tukipalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409480,Tutkimuksen tukipalvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240963,Tutkimuksen tukipalvelut lƤhipalvelutiimi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409630,Tutkimuksen tukipalvelut lƤhipalvelutiimi,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24195,Tutkimus- ja projektipalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24050,240538,Tyƶpaja,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240538,2405380,Tyƶpaja,, @@ -2214,7 +1960,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240224,TƤhtiti Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402240,TƤhtitiede,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24086,TƤydentƤvien opintojen keskus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2407702,Uarctic toimisto,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409241,Ulkoinen laskenta,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24065,UniOGS,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240650,University of Oulu Graduate School,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2406500,University of Oulu Graduate School,, @@ -2241,8 +1986,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409620,Viestin Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24196,ViestintƤ,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240927,"ViestintƤ, markkinointi ja yhteiskuntasuhteet",, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409270,"ViestintƤ, markkinointi ja yhteiskuntasuhteet",, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409620,ViestintƤpalvelut lƤhipalvelutiimi,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240962,ViestintƤpalvelut lƤhipalvelutiimi,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409251,Virastomestaripalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,24031,240274,Wei research group,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,240274,2402740,Wei research group,, @@ -2257,9 +2000,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409180,YO Lise Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240918,YO Tietohallinto,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409182,YO TietojƤrjestelmƤt,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2450,Yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24092,Yhteiset palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240948,Yhteiset tk-palvelut,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409480,Yhteiset tk-palvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240990,Yliopiston innovaatiokeskus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409900,Yliopiston innovaatiokeskus,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24091,Yliopiston yhteiset,, @@ -2269,7 +2009,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2409910,Yliopis Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240991,Yliopiston yleishallinto,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240555,YmpƤristƶ- ja kemiantekniikka,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405550,YmpƤristƶ- ja kemiantekniikka,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240949,YrittƤjyyspalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240532,ƃā€žlykkƤƤt koneet ja jƤrjestelmƤt,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2405320,ƃā€žlykkƤƤt koneet ja jƤrjestelmƤt,, Oulun yliopistollisen sairaalan erityisvastuualue,Oulu University Hospital Catchment Area,,06794809,,,,, @@ -2315,7 +2054,6 @@ Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammatt Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,410,Sosiaali- ja terveysala,, Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,830,TK-toiminta,, Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,110,Teknologia ja ympƤristƶala,, -Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,110,Teknologia ja ympƤristƶals,, Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,111,Tutkintokoulutus,, Savonia-ammattikorkeakoulu,Savonia University of Applied Sciences,Savonia-ammattikorkeakoulu,02537,,700,Yrityspalvelut,, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,,,,, @@ -2330,7 +2068,6 @@ SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjo SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,ElMa,ElMa,SeAMK Elintarvike ja maatalous,, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,LiKu,LiKu,SeAMK Liiketoiminta ja kulttuuri,, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,SosTer,SeAMK Sosiaali- ja terveysala (terveys & hyvinvoin,, -SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,SosTer,SeAMK Sosiaali- ja terveysala (terveys & hyvinvointi),, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,,02472,Tekn,Tekn,SeAMK Tekniikka,, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Teka,SeAMK Tekniikka (tekniikan alat),, SeinƤjoen ammattikorkeakoulu,SeinƤjoki University of Applied Sciences,SeinƤjoen ammattikorkeakoulu,02472,,Tsto,SeAMK Toimisto,, @@ -2348,22 +2085,13 @@ Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,70 Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020170001,Data and information centre,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020200001,Enheten for internationella Ƥrenden,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020140001,Freshwater centre,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020180001,Hallintopalvelut,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020120001,Havscentret,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100010,Ilmastonmuutos,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100001,Johdon tuki,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100030,KestƤvƤ kiertotalous,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020130001,Kulutuksen ja tuotannon keskus,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020160001,Laboratoriecentret,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100001,Ledningens stƶd,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020120001,Merikeskus,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020150001,Miljƶpolitikscentret,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100040,Program om miljƶinformation,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100040,Programme for environmental information,, Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020100020,Sustainable urbanisation programme,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020170001,Tietokeskus,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020140001,Vesikeskus,, -Suomen ympƤristƶkeskus,Finnish Environment Institute,Finlands miljƶcentral,7020017,,7020150001,YmpƤristƶpolitiikkakeskus,, Svenska handelshƶgskolan,Hanken School of Economics,,01910,,,,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8355,Aarresaari-nƤtverket,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,1,7421,AllmƤn adm. och personalƤrenden - Helsingfors,, @@ -2378,7 +2106,6 @@ Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,0 Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8321,Biblioteket,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3211,Biblioteket - Helsingfors,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,3215,Biblioteket - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8321,Biblioteket / projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8525,CCR - Centre for Corporate Responsibility,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4251,CCR - PRME och Green Office,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8531,CERS - Centre for Relationship Marketing and Servi,, @@ -2442,7 +2169,6 @@ Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,0 Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,,1,FƶrvaltningsƤmbetet,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8529,"GODESS - Gender, Organization, Diversity, Equality",, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8565,"HCCG - Hanken Centre for Accounting, Finance and G",, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8565,HCCG - Hanken Centre for Corporate Governance,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8535,HUMLOG - The Humanitarian Logistics and Supply Cha,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska Handelshƶgskolan,01910,5,8535,HUMLOG Institute,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4311,HandelsrƤtt - Helsingfors,, @@ -2543,7 +2269,6 @@ Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,0 Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7441,Studentrekrytering - Helsingfors,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7431,StudiebyrĆ„n - Helsingfors,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,7432,StudiebyrĆ„n - Vasa,, -Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8744,StudiebyrĆ„n - Vasa / projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,8743,StudiebyrĆ„n / projekt,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,21,Studier och antagning,, Svenska handelshƶgskolan,Hanken School of Economics,Svenska handelshƶgskolan,01910,,4711,Svenska - Helsingfors,, @@ -2834,7 +2559,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311414,Automaatioteknologia YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311414,Automaatioteknologia YAMK;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106030,Autotekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106030,Autotekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311110,Avoin amk,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4131000,Avoin amk,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105040,Bachelor's Degree Programme in Environmental Engin,, @@ -2845,40 +2569,31 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,3 Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311505,Bioanalytiikan ko;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101030,Bioanalytiikko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101040,Bioanalytiikko SeinƤjoki,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101030,Bioanalyytikon tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311408,Biotuote- ja prosessitekniikan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311408,Biotuote- ja prosessitekniikan ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105030,Biotuotetekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105030,Biotuotetekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311410,DP in Energy and Environmental Engineering;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105040,DP in Environmental Engineering,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311301,DP in International Business ;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103030,DP in Media and Arts,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311255,DP in Media and Arts;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311511,DP in Nursing;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106100,Dataosaaminen ja tekoƤly YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311410,Degree Programme in Energy and Environmental Engineering,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102040,Degree Programme in IBM/MEL (MBA),, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311301,Degree Programme in International Business,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102030,Degree Programme in International Business,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311255,Degree Programme in Media and Arts,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107030,Degree Programme in Nursing,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130000,EDU hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121030,EDUn projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311254,Elokuvan ja television ko;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311524,"Ensihoitaja-, KƤtilƶ- ja terveydenhoitajakoulutu",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107040,"Ensihoitaja-, kƤtilƶ- ja terveydenhoitaja",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311504,Ensihoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107040,"Ensihoitajan, kƤtilƶn ja terveydenhoitajan tutki",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4132000,Erikoistumiskoulutukset,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4132000,Erikoistumisopinnot,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107050,Fiiliskeskus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311014,Floworks,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311445,Fysiikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104030,Fysiikka ja matematiikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311445,Fysiikka;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101050,Fysioterapeutin tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101050,Fysioterapeutti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101060,Fysioterapiaklinikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311507,Fysioterapian ko / Fysioterapeutti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311507,Fysioterapian ko;;,, @@ -2902,7 +2617,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101110,Hyvinvointiteknologia YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311514,Hyvinvointiteknologian ko / YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311514,Hyvinvointiteknologian ko YAMK;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101110,Hyvinvointiteknologian ylempi tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142030,Impact areas,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4146000,Investoinnit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102090,KansainvƤlinen myynti ja myynnin johtaminen YAMK,, @@ -2930,7 +2644,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311401,Konetekniikan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311401,Konetekniikan ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106040,Konetekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106040,Konetekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150000,Konsernikirjaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H4,Korkeakoulupalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H4,Korkeakoulupalvelut;;,, @@ -2949,11 +2662,9 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311599,KƤtilƶkoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311013,Laadunhallinta ja toiminnanohjaus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143010,Laadunhallinta ja toiminnanohjaus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143010,Laatu ja toiminnanohjaus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311409,Laboratoriotekniikan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311409,Laboratoriotekniikan ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105050,Laboratoriotekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105050,Laboratoriotekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311300,Liiketalouden ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311300,Liiketalouden ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311303,Liiketalouden ko maakunnat;;,, @@ -2968,7 +2679,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H52,Liiketoiminta ja palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102000,Liiketoiminta ja palvelut hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102010,"Liiketoiminta ja palvelut laboratoriot, opetustila",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102010,"Liiketoiminta ja palvelut laboratoriot, opetustilat ja -tarvikkeet",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102020,Liiketoiminta ja palvelut varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H52,Liiketoiminta ja palvelut;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H3,Liiketoiminta;;,, @@ -2998,7 +2708,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000220,"Media, musiikki ja taide",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103000,"Media, musiikki ja taide hallinto",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103010,"Media, musiikki ja taide laboratoriot, opetustilat",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103010,"Media, musiikki ja taide laboratoriot, opetustilat ja -tarvikkeet",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103020,"Media, musiikki ja taide varaukset",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103040,Media-ala,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311257,Media-alan koulutus,, @@ -3009,14 +2718,12 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311450,MetsƤtalouden ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311450,MetsƤtalouden ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105060,MetsƤtalouden tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105060,MetsƤtalous,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311260,"Musiikin ko, Musiikkipedagogi YAMK",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311260,"Musiikin ko, Musiikkipedagogi YAMK;;",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311251,"Musiikin ko, Musiikkipedagogi;;",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311250,"Musiikin ko, muusikko, Musiikkipedagogi",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311250,"Musiikin ko, muusikko;;",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103080,Musiikin ylempi tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103080,Musiikki YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103050,"Musiikkipedagogi, muusikko",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311080,Opintopalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144080,Opintopalvelut,, @@ -3024,7 +2731,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144060,Opiskelijarekrytointi,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000204,Oppimisen ja hyvinvoinnin palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144070,Oppimisen ja hyvinvoinnin tuki,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144070,Oppimisen tuki,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130020,Osaamisen myynti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121020,PEDA koulutushankkeet,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311558,Palvelu- ja projektiliiketoiminnan ko YAMK,, @@ -3046,7 +2752,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000222,Rakennettu ympƤristƶ ja biotalous,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105000,Rakennettu ympƤristƶ ja biotalous hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105010,"Rakennettu ympƤristƶ ja biotalous laboratoriot,",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105010,"Rakennettu ympƤristƶ ja biotalous laboratoriot, koulutustilat ja -tarvikkeet",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105020,Rakennettu ympƤristƶ ja biotalous varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311403,Rakennus- ja yhdyskuntatekniikan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311403,Rakennus- ja yhdyskuntatekniikan ko ;;,, @@ -3054,12 +2759,9 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311407,Rakennusalan tyƶnjohdon ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311407,Rakennusalan tyƶnjohdon ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105090,Rakennusalan tyƶnjohdon tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105090,Rakennusalan tyƶnjohto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105070,Rakennusarkkitehdin tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311423,Rakennusarkkitehti,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105070,Rakennusarkkitehti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311423,Rakennusarkkitehti;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105080,Rakennustekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105120,Rakentaminen YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311424,Rakentaminen YAMK;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H53,Rakentaminen ja ympƤristƶteknologia,, @@ -3077,7 +2779,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107060,Sairaanhoitaja,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311502,Sairaanhoitajakoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311502,Sairaanhoitajakoulutus;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107060,Sairaanhoitajan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311417,Software Engineering;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311516,Sosiaali- ja terveysalan johtamisen koulutus (YAMK),, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101120,Sosiaaliala YAMK,, @@ -3094,26 +2795,21 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311404,SƤhkƶ- ja automaatiotekniikan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311404,SƤhkƶ- ja automaatiotekniikan ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106080,SƤhkƶ- ja automaatiotekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106080,SƤhkƶ- ja automaatiotekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311600,TAMK Ammatillinen opettajankoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311600,TAMK Ammatillinen opettajankoulutus;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311110,TAMK EDU;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104040,TAMK Kielet ja viestintƤ,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104030,TAMK Matematiikka ja fysiikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142,TKI ja maksullinen palvelutoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000050,TKI ja maksullinen palvelutoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142000,TKI ja maksullinen palvelutoiminta hallinto,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120000,TKI-palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000063,TKI-palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120000,TKI-palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311800,TKI-palvelut;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121030,TREE TAMK projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H51,"Taide, musiikki ja media",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H51,"Taide, musiikki ja media;;",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311406,"Talotekniikan ko , sƤhkƶinen talotekniikka, LVI-tekniikka",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311411,Talotekniikan ko YAMK;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311406,"Talotekniikan ko, LVI-tekniikka / sƤhkƶinen talo",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105100,Talotekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105100,Talotekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105110,Talotekniikka YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311004,Talous- ja projektipalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311004,Talous- ja projektipalvelut;;,, @@ -3126,13 +2822,11 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106110,Teknologiajohtaminen koulutus YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311413,Teknologiaosaamisen johtamisen ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311413,Teknologiaosaamisen johtamisen ko YAMK;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106110,Teknologiaosaamisen johtamisen ylempi tutkinto-ohj,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H54,Teollisuusteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106,Teollisuusteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000223,Teollisuusteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106000,Teollisuusteknologia hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106010,"Teollisuusteknologia laboratoriot, opetustilat ja",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106010,"Teollisuusteknologia laboratoriot, opetustilat ja -tarvikkeet",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106020,Teollisuusteknologia varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H54,Teollisuusteknologia;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311518,Terveyden edistƤmisen koulutus YAMK,, @@ -3141,7 +2835,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311524,Terveydenhoitajakoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107,Terveys,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000225,Terveys,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107070,Terveys YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107000,Terveys hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107010,"Terveys laboratoriot, opetustilat ja -tarvikkeet",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107020,Terveys varaukset,, @@ -3158,24 +2851,20 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106050,TietojenkƤsittely,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311350,TietojenkƤsittely ko ;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311350,TietojenkƤsittelyn ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106050,TietojenkƤsittelyn tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106120,TietojƤrjestelmƤosaaminen YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311351,TietojƤrjestelmƤosaamisen ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311351,TietojƤrjestelmƤosaamisen ko YAMK;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106060,Tietotekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106060,Tietotekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143030,Tila- ja kiinteistƶpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143060,Tila- ja kiinteistƶpalvelut TAU,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311041,Tilapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311041,Tilapalvelut;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130030,Tuotteistetut palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120,"Tutkimus, kehitys ja innovaatiotoiminta",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120000,"Tutkimus-, kehitys ja innovaatiot",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108010,Tutkintoon johtava myytƤvƤ koulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311925,TyƶelƤmƤpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130040,Tyƶvoimakoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4131,TƤydennyskoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108010,TƤydennyskoulutus ja myytƤvƤt palvelut (TAMK ED,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121000,Ulkoiset TKI-projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121010,Ulkoiset pedagogiset TKI-projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121,Ulkoiset projektit,, @@ -3185,7 +2874,6 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130060,Vuokraus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142020,Y-Kampus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311924,Y-Kampus;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142020,Y-kampus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108000,Yhteiset opinnot,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia;,, @@ -3307,10 +2995,8 @@ Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorke Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961121,"Amk, Koulutuksen kehittƤminen",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961124,"Amk, Kumppanuus ja kehittƤminen",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965021,"Amk, Kuntoutus, suun terveydenhoito ja diagnostise",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,965021,"Amk, Kuntoutus, suun terveydenhoito ja diagnostiset palvelut",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963014,"Amk, Kuvataide",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962517,"Amk, Liiketoiminta ja palvelut",, -Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,962517,"Amk, Logistiikka, palvelut ja tuotantotalous",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,963015,"Amk, Media-ala",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961132,"Amk, Opiskelijapalvelut",, Turun ammattikorkeakoulu,Turku University of Applied Sciences,Turun ammattikorkeakoulu,02509,,961136,"Amk, OppimisympƤristƶpalvelut",, @@ -3473,7 +3159,6 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607500,HammaslƤƤk Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601424,Hankintapalvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601108,Hankintapalvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601400,Henkilƶstƶpalvelut,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602200,"Historian, kultt ja tait tutk lts:n yht",, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607400,Hoitotieteen laitos,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602001,Humanistisen tiedekunnan hallintopalvelu,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602000,Humanistisen tiedekunnan yhteiset,, @@ -3498,7 +3183,6 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603207,John Morton Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608301,Johtaminen ja organisointi,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608300,Johtaminen ja yrittƤjyys yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601000,Johto,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601000,Johto ja yliopiston yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602216,Kalevala instituutti,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601411,KansainvƤliset palvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601224,KansainvƤliset palvelut,, @@ -3549,11 +3233,9 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601410,Koulutuksen Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601223,Koulutuksen toimialan yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609900,Koulutussosiologian tutkimuskeskus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603309,Koulutussosiologian tutkimuskeskus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602221,Kulttuurien tutkimus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602215,Kulttuurientutkimuksen arkisto,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602205,Kulttuurihistoria,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602214,Kulttuurituotannon ja maisemantutkim ko,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602214,Kulttuurituotanto ja maisemantutkimus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608202,Kv. liiketoiminta,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606707,Kvanttioptiikan laboratorio,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601212,LLK Biokemian laitos,, @@ -3607,7 +3289,6 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601106,Matkapalvelu Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607003,MediCity,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602223,"Median, musiikin ja taiteen tutkimus",, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602206,Mediatutkimus,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602223,Memuta,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609061,Microelectronics,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606807,Mikroelektroniikka,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609210,Mikrosirukeskus,, @@ -3624,7 +3305,6 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606205,Molekulaarin Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602218,Museologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602207,Musiikkitiede,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601004,Muutostuki,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607314,Neurologia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601651,New Bioresources,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604202,OKL Rauma,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604201,OKL Turku,, @@ -3713,7 +3393,6 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601236,TIAS-tutkija Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609060,TRC yhteiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608805,TSE Porin yksikƶn kehittƤminen,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602210,Taidehistoria,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601109,Talouden suunnittelu ja seuranta,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2608204,Talousmaantiede,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601100,Talouspalvelut,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603304,Taloussosiologia,, @@ -3783,11 +3462,9 @@ Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603020,Yhteisk tdk Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602212,Yleinen historia,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602220,Yleinen historia ja kulttuurihistoria,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602213,Yleinen kirjallisuustiede,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2602220,Yleinen kulttuurihistoria,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2607328,YleislƤƤketiede,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2603205,Yliopistojen Aasia-verkosto,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,02609023,Yliopistokehitys,, -Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601004,Yliopiston theiset,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2601286,Yliopiston vaikuttavuus,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2604305,YlƤkoulu,, Turun yliopisto,University of Turku,ƅbo universitet,10089,,2606018,YmpƤristƶmuutokset,, @@ -3919,10 +3596,6 @@ Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739050,Vebic omara Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739100,Vebic taloudellinen toiminta,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,270900,Vebic-alusta,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2709000,Vebic-alusta,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,273900,Vebic-infra,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739150,Vebic-infra julkinen rahoitus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739050,Vebic-infra omarahoitus,, -Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2739100,Vebic-infra taloudellinen toiminta,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2710250,VenƤjƤn kieli,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2708140,ViestintƤ ja kumppanuudet,, Vaasan yliopisto,University of Vaasa,Vasa universitet,01913,,2760250,ViestintƤ ja kumppanuudet,, @@ -3937,7 +3610,6 @@ VƤestƶrekisterikeskus,Population Register Center,,02454372,,,,, VƤylƤvirasto,Finnish Transport Infrastructure Agency,,10105471,,,,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,,02535,,,,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,111,Alla utbildningar pĆ„ hƤlsa och vƤlfƤrd fƶruto,, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,111,Alla utbildningar pĆ„ hƤlsa och vƤlfƤrd fƶrutom idrott,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,611,Alla utbildningar pĆ„ institutionen fƶr vĆ„rd,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,27,IT-utbildningen,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,112,Idrottsutbildningen,, @@ -3950,7 +3622,6 @@ Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arc Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,21,Pedagogik pĆ„ institutionen fƶr ekonomi och affƤ,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,31,Pedagogik pĆ„ institutionen fƶr energi- och mater,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,11,Pedagogik pĆ„ institutionen fƶr hƤlsa och vƤlfƃ,, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,11,Pedagogik pĆ„ institutionen fƶr hƤlsa och vƤlfƤrd,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,41,Pedagogik pĆ„ institutionen fƶr kultur och media,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,61,Pedagogik pĆ„ institutionen fƶr vĆ„rd,, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,38,Teknikutbildningen,, @@ -3965,7 +3636,6 @@ Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arc Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,91,"ƃā€“vriga, pedagogiska omrĆ„det",, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,912,"ƃā€“vriga, service och tjƤnster",, Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,98,"ƃā€“vriga, teknikomrĆ„det",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,9,Ɩvriga enheter,, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,55,"Ɩvriga, Ekonomi, administration och juridik",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,511,"Ɩvriga, HV (utom idrott)",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,53,"Ɩvriga, Humaniora",, @@ -3974,17 +3644,8 @@ Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,52,"Ɩvriga, Konst- och ku Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,51,"Ɩvriga, Pedagogik",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,512,"Ɩvriga, Service och tjƤnster",, Yrkeshƶgskolan Arcada,,Yrkeshƶgskolan Arcada,02535,,58,"Ɩvriga, Teknik",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,95,"Ɩvriga, ekonomi, administration och juridik",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,93,"Ɩvriga, humanistiska omrĆ„det",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,911,"Ɩvriga, hƤlsa och vƤlfƤrdsomrĆ„det",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,97,"Ɩvriga, informations- och kommunikationsteknik",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,92,"Ɩvriga, konst- och kulturomrĆ„det",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,91,"Ɩvriga, pedagogiska omrĆ„det",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,912,"Ɩvriga, service och tjƤnster",, -Yrkeshƶgskolan Arcada,Arcada University of Applied Sciences,Yrkeshƶgskolan Arcada,02535,,98,"Ɩvriga, teknikomrĆ„det",, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,,10066,,,,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,AD,Administration,, -Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,AD,Administration;Administration;,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,JA,Enheten Jakobstad;Enheten Jakobstad;,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,VA,Enheten Vasa;Enheten Vasa;,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,ƃā€¦R,Enheten ƃā€¦boRaseborg;Enheten ƃā€¦boRaseborg;,, @@ -4012,9 +3673,7 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,99,Bioteknikcentrum,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804400,Bioteknikcentrum,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280440,Bioteknikcentrum,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280440,Bioteknikcentrum (BTC),, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804360,Biovet gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804360,Biovet. gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,95,Biovetenskap,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280430,Biovetenskaper,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,92,Cellbiologi,, @@ -4023,11 +3682,8 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280500,Centret fƶr LivslĆ„ngt LƤrande,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805010,Centret fƶr SprĆ„k och Kommunikation,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,124,Centret fƶr livslĆ„ngt lƤrande,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805000,Centret fƶr livslĆ„ngt lƤrande,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280500,Centret fƶr livslĆ„ngt lƤrande,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,125,Centret fƶr livslĆ„ngt lƤrande (CLL),, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,128,Centret fƶr sprĆ„k och kommunikation,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2805010,Centret fƶr sprĆ„k och kommunikation,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,129,Centret fƶr sprĆ„k och kommunikation (CSK),, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,130,Datacentralen,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,131,Datacentralen,, @@ -4060,17 +3716,14 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802142,DoktorandnƤtverk FPV 2020-2023,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803144,DoktorandnƤtverk FSE 2018-2021,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803145,DoktorandnƤtverk FSE 2020-2023,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804343,DoktorandnƤtverk Functional Marine Biodiversity,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804540,DoktorandnƤtverk IT & Mathematics,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804340,DoktorandnƤtverk Inform. and Struct. Bio,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804340,DoktorandnƤtverk Informational and Structural Bio,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804140,DoktorandnƤtverk Material Research,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804060,DoktorandnƤtverk Material Research,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801144,DoktorandnƤtverk Minority Research,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804341,DoktorandnƤtverk Molecular Biosciences,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801141,DoktorandnƤtverk Old Testament Studies,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803140,DoktorandnƤtverk Realizing Human R.,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803140,DoktorandnƤtverk Realizing Human Rights,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803141,DoktorandnƤtverk School of Business Economics,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801145,DoktorandnƤtverk The Age of Sweden,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,142,Ekonomi,, @@ -4079,38 +3732,26 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806132,"Ekonomiservice, FPV",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806133,"Ekonomiservice, FSE",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806130,"Ekonomiservice, central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806130,"Ekonomiservice,Central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806131,"Ekonomiservice,FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806134,"Ekonomiservice,FNT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806132,"Ekonomiservice,FPV",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806133,"Ekonomiservice,FSE",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804121,Energi- och miljƶteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804720,Energiteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,121,Energiteknik Vasa,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,3,Engelska,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,10,Etnologi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802130,Experience Lab,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802130,Experience lab,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280100,FHPT StƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801010,FHPT Uniservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801000,FHPT gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801010,FHPT universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801150,FHPT utbildning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280400,FNT StƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804000,FNT gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280400,FNT stƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804010,FNT universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802000,FPV Gemensamma kostnader,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280200,FPV StƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280220,FPV Vasa ƃā€“vningsskola (Vƃā€“S),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802000,FPV gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280200,FPV stƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802010,FPV universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802150,FPV utbildning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803000,FSE Gemensamma kostnader,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280300,FSE StƶdtjƤnster,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803000,FSE gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280300,FSE stƶdtjƤnster,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803010,FSE universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803150,FSE utbildning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,91,Farmaci,, @@ -4154,7 +3795,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802110,HƤlsovetenskaper,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803113,IAMSR,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806160,ICT service,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806160,ICT-service,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804721,Industriell Ekonomi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,122,Industriell ekonomi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,55,Informationsfƶrvaltining,, @@ -4167,7 +3807,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,110,Insitutionen fƶr kemiteknik gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,101,Instit. fƶr informationsteknologi gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,105,Institute for Advanced Management System Research (IAMSR),, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803123,Institutet fƶr mƤnskliga rƤttigheter,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,69,Institutet fƶr mƤnskliga rƤttiheter,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803122,Institutet fƶr samhƤllsforskning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,88,Institutionen fƶr biovetenskaper,, @@ -4195,11 +3834,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806102,"Lednings- och fakultetsstƶd, FPV",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806103,"Lednings- och fakultetsstƶd, FSE",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806100,"Lednings- och fakultetsstƶd, central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806101,"Lednings- och fakultetsstƶd,FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806104,"Lednings- och fakultetsstƶd,FNT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806102,"Lednings- och fakultetsstƶd,FPV",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806103,"Lednings- och fakultetsstƶd,FSE",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806100,"Lednings- och fakultetsstƶd,central",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,15,Litteraturvetenskap,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,47,Logopedi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801111,Logopedi,, @@ -4215,8 +3849,8 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803110,Nationalekonomi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,86,Nationella PET centret,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,87,Nationella PET-Centret,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804200,Nationella PET-centret,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280420,Nationella PET-centret,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804200,Nationella PET-centret,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804130,Naturmaterialteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804711,Naturmaterialteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804100,Naturvetenskaper,, @@ -4229,8 +3863,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,74,Offentlig fƶrvaltning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,112,Oorganisk kemi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,82,Organisk kemi,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804200,PET-centret,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280420,PET-centret,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,117,PappersfƶrƤdling,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802100,Pedagogik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,21,Pedagogik gemensamma,, @@ -4257,11 +3889,9 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,13,Religionsvetenskap,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,6,Ryska,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803121,RƤttsvetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803121,RƤttsvetenskaper,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,66,RƤttsvetenskapliga institutionen,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,68,RƤttvetenskaper,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,67,RƤttvetenskaper gemensamma,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803122,Samforsk,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,75,Samforsk,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803120,SamhƤllsvetenskaper,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280310,SamhƤllsvetenskaper och ekonomi,, @@ -4269,7 +3899,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806020,Sibeliusmuseum,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806030,Sjƶhistoriska insitutet,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,18,Sjƶhistoriska institutet,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806030,Sjƶhistoriska institutet,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,28,Slƶjdpedagogik och husliga ekonomins didaktik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,63,Socialpolitik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802120,Socialvetenskaper,, @@ -4290,7 +3919,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,114,Teknisk kemi och reaktionsteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,113,Teknisk polymerkemi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,146,Teknisk service,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804050,Tekniska serviceenheten,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2801120,Teologi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,39,Teologisk etik och religionsfilosofi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,35,Teologiska fakulteten,, @@ -4313,22 +3941,13 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806123,"Utbildningsservice, FPV",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806124,"Utbildningsservice, FSE",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806121,"Utbildningsservice, central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806121,"Utbildningsservice,Central",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806122,"Utbildningsservice,FHPT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806125,"Utbildningsservice,FNT",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806123,"Utbildningsservice,FPV",, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806124,"Utbildningsservice,FSE",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,64,Utvecklingspsykologi,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,30,Vasa ƶvningsskola,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280220,Vasa ƶvningsskola,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,79,Verkstaden,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,25,Vuxenpedagogik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,Vƃā€“S Gemensamma kostnader,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,Vƃā€“S Grundutbildning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,Vƃā€“S Gymnasiet,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,Vƃā€“S gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,Vƃā€“S grundutbildning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,Vƃā€“S gymnasiet,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,31,VƖS gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,34,"VƖS, gymnasiet",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,32,"VƖS, Ć„k 1-6",, @@ -4337,8 +3956,6 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,62,VĆ„rdvetenskap,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƃā€¦bo Akademis Bibliotek,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280600,ƃā€¦bo Akademis Bibliotek,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƃā€¦bo Akademis bibliotek,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280600,ƃā€¦bo Akademis bibliotek,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,136,ƅA gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,137,ƅA gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,133,ƅAB,, From 1c0808f1dac48da04c5aa1672d0e44b8735a5737 Mon Sep 17 00:00:00 2001 From: Toni Date: Thu, 27 May 2021 17:44:53 +0300 Subject: [PATCH 009/160] fix semicolon in org unit names --- .../management/commands/update_orgs.py | 2 + .../resources/organizations/organizations.csv | 232 +++++++++--------- 2 files changed, 118 insertions(+), 116 deletions(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index fa498b1b..e7a8d26e 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -66,6 +66,8 @@ def compare_and_update(self, other): elif self.org_csc is not None and other.org_csc is None: other.org_csc = self.org_csc changes +=1 + if self.unit_name.endswith(";;"): + self.unit_name = self.unit_name[:-2] return match, changes diff --git a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv index c3216311..d04e8d23 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv +++ b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv @@ -560,42 +560,42 @@ Karelia,Karelia University of Applied Sciences,,02469,,6057,6057 Muotoilu,, Karelia,Karelia University of Applied Sciences,,02469,,7000,7000 Koulutuksen hallinto/Tikkarinne,, Karelia,Karelia University of Applied Sciences,,02469,,8067,8067 Maakuntakorkeakoulu,, Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,,02469,,,,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1054,1054 Rehtorin toimisto;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1055,1055 Opintoasiainpalvelut;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1053,1056 KV-palvelut;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1057,1057 Laskentapalvelut;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1060,1060 Henkilƶstƶpalvelut;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1063,1063 Tiedotus- ja markkinointi;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1066,1066 Tietohallinto;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1069,1069 Kirjasto;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1072,1072 KansainvƤlistymispalvelut;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1080,1080 Opetuksen yhteiset henk.kulut;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1970,1970 Projektitoiminta/YPA;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,2051,2051 TKI- ja palveluliiketoiminta;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3051,3051 MetsƤtalous;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3054,3054 Maaseutuelinkeinot;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3101,3101 Energia- ja ympƤristƶtekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5011,5011 TKI/Projektit/Tekniikan alat;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5031,5031 TietojenkƤsittely;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5051,5051 Konetekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5054,5054 Rakennustekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5057,5057 Tieto- ja viestintƤtekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5061,5061 Talotekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5064,5064 SƤhkƶtekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5067,5067 YAMK Teknologiaosaamisen johtaminen;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5101,5101 Liiketalous;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5104,5104 International business;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1054,1054 Rehtorin toimisto,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1055,1055 Opintoasiainpalvelut,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1053,1056 KV-palvelut,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1057,1057 Laskentapalvelut,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1060,1060 Henkilƶstƶpalvelut,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1063,1063 Tiedotus- ja markkinointi,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1066,1066 Tietohallinto,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1069,1069 Kirjasto,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1072,1072 KansainvƤlistymispalvelut,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1080,1080 Opetuksen yhteiset henk.kulut,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,1970,1970 Projektitoiminta/YPA,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,2051,2051 TKI- ja palveluliiketoiminta,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3051,3051 MetsƤtalous,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3054,3054 Maaseutuelinkeinot,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,3101,3101 Energia- ja ympƤristƶtekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5011,5011 TKI/Projektit/Tekniikan alat,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5031,5031 TietojenkƤsittely,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5051,5051 Konetekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5054,5054 Rakennustekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5057,5057 Tieto- ja viestintƤtekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5061,5061 Talotekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5064,5064 SƤhkƶtekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5067,5067 YAMK Teknologiaosaamisen johtaminen,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5101,5101 Liiketalous,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5104,5104 International business,, Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5110,5110 YAMK Johtamisen ja liiketoimintaosaamisen kou,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5999,5999 Elinkeinotoiminta/tekniikka;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6051,6051 Medianomikoulutus;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6054,6054 Musiikkipedagogikoulutus;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6151,6151 Restonomi;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7051,7051 Sosionomi;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7054,7054 Sairaanhoitaja;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7057,7057 Fysioterapeutti;;,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7061,7061 Terveydenhoitaja;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,5999,5999 Elinkeinotoiminta/tekniikka,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6051,6051 Medianomikoulutus,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6054,6054 Musiikkipedagogikoulutus,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,6151,6151 Restonomi,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7051,7051 Sosionomi,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7054,7054 Sairaanhoitaja,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7057,7057 Fysioterapeutti,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7061,7061 Terveydenhoitaja,, Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7063,7063 YAMK Sosiaali- ja terveysalan kehittƤminen j,, -Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7064,7064 YAMK IkƤosaaminen;;,, +Karelia-ammattikorkeakoulu,Karelia University of Applied Sciences,Karelia-ammattikorkeakoulu,02469,,7064,7064 YAMK IkƤosaaminen,, Koneen SƤƤtiƶ,Kone Foundation,,02135371,,,,, Kotimaisten kielten keskus,Institute for the Languages of Finland,,02458728,,,,, Kuopion yliopistollisen sairaalan erityisvastuualue,Kuopio University Hospital Catchment Area,,01714953,,,,, @@ -2547,17 +2547,17 @@ Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472120, Taideyliopisto,University of the Arts Helsinki,Konstuniversitetet,10103,,472200,Y/Yhteiset_vi,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,,,,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311402,Ajoneuvotekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311402,Ajoneuvotekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311402,Ajoneuvotekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104010,Ammatillinen opettajankoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4140000,Ammattikorkeakoulun johto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4140,Ammattikorkeakoulun johto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4146,Ammattikorkeakoulun yhteiset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311831,Ammattipedagoginen TKI,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120010,Ammattipedagoginen TKI,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311831,Ammattipedagoginen TKI;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311021,Asiakirjahallinnon ja johdon tuki;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311831,Ammattipedagoginen TKI,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311021,Asiakirjahallinnon ja johdon tuki,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311414,Automaatioteknologia YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311414,Automaatioteknologia YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311414,Automaatioteknologia YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106030,Autotekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311110,Avoin amk,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4131000,Avoin amk,, @@ -2565,17 +2565,17 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102030,Bachelor's Degree Programme in International Busin,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103030,Bachelor's Degree Programme in Media and Arts,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107030,Bachelor's Degree Programme in Nursing,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311505,Bioanalytiikan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311505,Bioanalytiikan ko / Bioanalyytikko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311505,Bioanalytiikan ko;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101030,Bioanalytiikko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101040,Bioanalytiikko SeinƤjoki,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311408,Biotuote- ja prosessitekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311408,Biotuote- ja prosessitekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311408,Biotuote- ja prosessitekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105030,Biotuotetekniikan tutkinto-ohjelma,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311410,DP in Energy and Environmental Engineering;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311301,DP in International Business ;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311255,DP in Media and Arts;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311511,DP in Nursing;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311410,DP in Energy and Environmental Engineering,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311301,DP in International Business ,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311255,DP in Media and Arts,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311511,DP in Nursing,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106100,Dataosaaminen ja tekoƤly YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311410,Degree Programme in Energy and Environmental Engineering,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102040,Degree Programme in IBM/MEL (MBA),, @@ -2583,7 +2583,7 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,3 Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311255,Degree Programme in Media and Arts,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130000,EDU hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121030,EDUn projektit,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311254,Elokuvan ja television ko;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311254,Elokuvan ja television ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311524,"Ensihoitaja-, KƤtilƶ- ja terveydenhoitajakoulutu",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107040,"Ensihoitaja-, kƤtilƶ- ja terveydenhoitaja",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311504,Ensihoitajakoulutus,, @@ -2591,24 +2591,24 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107050,Fiiliskeskus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311014,Floworks,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311445,Fysiikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311445,Fysiikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104030,Fysiikka ja matematiikka,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311445,Fysiikka;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101050,Fysioterapeutin tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101060,Fysioterapiaklinikka,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311507,Fysioterapian ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311507,Fysioterapian ko / Fysioterapeutti,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311507,Fysioterapian ko;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141030,Hallintopalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311021,Hallintopalvelut (ja tapahtumapalvelut),, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311030,Hankintapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311030,Hankintapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311030,Hankintapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141,Henkilƶstƶ- ja viestintƤpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000052,Henkilƶstƶ- ja viestintƤpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H2,Henkilƶstƶhallinto,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H2,Henkilƶstƶhallinto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H2,Henkilƶstƶhallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141020,Henkilƶstƶn kehittƤminen,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H2,311003,Henkilƶstƶpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141000,Henkilƶstƶpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311003,Henkilƶstƶpalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311003,Henkilƶstƶpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101,Hyvinvointi ja terveysteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000224,Hyvinvointi ja terveysteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101010,"Hyvinvointi ja terveysteknologia laboratoriot, opetustilat ja -tarvikkeet",, @@ -2616,58 +2616,58 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101000,Hyvinvointi ja terveyteknologia hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101110,Hyvinvointiteknologia YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311514,Hyvinvointiteknologian ko / YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311514,Hyvinvointiteknologian ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311514,Hyvinvointiteknologian ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142030,Impact areas,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4146000,Investoinnit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102090,KansainvƤlinen myynti ja myynnin johtaminen YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311302,KansainvƤlisen myynnin ja myynnin johtamisen koul,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311090,KansainvƤliset palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144030,KansainvƤliset palvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311090,KansainvƤliset palvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311090,KansainvƤliset palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H6,KehittƤminen,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144000,KehittƤminen,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311014,KehittƤminen (ent. Floworks);;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H6,KehittƤminen;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H6,KehittƤminen,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311014,KehittƤminen (ent. Floworks),, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311017,KehittƤmisyksikkƶ,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311017,KehittƤmisyksikkƶ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311017,KehittƤmisyksikkƶ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104040,Kielet,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311112,Kielipalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311112,Kielipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311112,Kielipalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311040,Kiinteistƶpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311040,Kiinteistƶpalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311040,Kiinteistƶpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4145,Kirjasto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4145000,Kirjasto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311050,Kirjasto- ja tietopalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311050,Kirjasto- ja tietopalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311050,Kirjasto- ja tietopalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311529,Kliinisen asiantuntijan koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311529,Kliinisen asiantuntijan koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311529,Kliinisen asiantuntijan koulutus YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311401,Konetekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311401,Konetekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311401,Konetekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106040,Konetekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150000,Konsernikirjaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H4,Korkeakoulupalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H4,Korkeakoulupalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H4,Korkeakoulupalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144,Koulutuksen ja oppimisen palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000051,Koulutuksen ja oppimisen palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144010,Koulutuksen kehittƤmispalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144020,Koulutuksen tukipalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311100,Koulutuksen tukipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311100,Koulutuksen tukipalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108,Koulutuksen yhteiset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H5,Koulutus ja TKI-toiminta,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H5,Koulutus ja TKI-toiminta;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H5,Koulutus ja TKI-toiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130050,Koulutusvienti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311252,Kuvataiteen ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311252,Kuvataiteen ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311252,Kuvataiteen ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102050,Kykylaakso,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311599,KƤtilƶkoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311013,Laadunhallinta ja toiminnanohjaus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143010,Laadunhallinta ja toiminnanohjaus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311409,Laboratoriotekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311409,Laboratoriotekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311409,Laboratoriotekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105050,Laboratoriotekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311300,Liiketalouden ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311300,Liiketalouden ko ;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311303,Liiketalouden ko maakunnat;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311300,Liiketalouden ko ,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311303,Liiketalouden ko maakunnat,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102060,Liiketalous,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130080,Liiketoiminnan TKI-projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130070,Liiketoiminnan kehittƤminen,, @@ -2676,34 +2676,34 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H1,H3 Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102,Liiketoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130,Liiketoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000221,Liiketoiminta,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H3,Liiketoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H52,Liiketoiminta ja palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H52,Liiketoiminta ja palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102000,Liiketoiminta ja palvelut hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102010,"Liiketoiminta ja palvelut laboratoriot, opetustila",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102020,Liiketoiminta ja palvelut varaukset,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H52,Liiketoiminta ja palvelut;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H3,Liiketoiminta;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143,Liiketoimintapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143000,Liiketoimintapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000049,Liiketoimintapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144040,Liikkuvuusprojektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311022,Liikuntapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144090,Liikuntapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311022,Liikuntapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311022,Liikuntapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103060,MA in Screenwriting YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311921,MD in Information Technology;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311921,MD in Information Technology,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311558,MD in International Business Management / in Educa,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311456,MD in Management and Economy in the International,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311422,MD in Risk Management and Circular Economy;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311259,MD in Screenwriting YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311422,MD in Risk Management and Circular Economy,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311259,MD in Screenwriting YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311259,MDP in Screenwriting YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142010,Maakuntakorkeakoulu,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311456,Master's Degree Programme in Management and Economy in the International Forest Sector,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311921,MasterĀ“s Degree in Information Technology,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311444,Matematiikka,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311444,Matematiikka;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311444,Matematiikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H2,311007,Matkapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141010,Matkapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311007,Matkapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311007,Matkapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103,"Media, musiikki ja taide",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000220,"Media, musiikki ja taide",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103000,"Media, musiikki ja taide hallinto",, @@ -2711,23 +2711,23 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103020,"Media, musiikki ja taide varaukset",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103040,Media-ala,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311257,Media-alan koulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311257,Media-alan koulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311257,Media-alan koulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103070,"Mediatuottaminen, Emerging Media YAMK",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311256,Mediatuottamisen ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311256,Mediatuottamisen ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311256,Mediatuottamisen ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311450,MetsƤtalouden ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311450,MetsƤtalouden ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311450,MetsƤtalouden ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105060,MetsƤtalouden tutkinto-ohjelma,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311251,"Musiikin ko, Musiikkipedagogi",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311260,"Musiikin ko, Musiikkipedagogi YAMK",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311260,"Musiikin ko, Musiikkipedagogi YAMK;;",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311251,"Musiikin ko, Musiikkipedagogi;;",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311260,"Musiikin ko, Musiikkipedagogi YAMK",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311250,"Musiikin ko, muusikko",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311250,"Musiikin ko, muusikko, Musiikkipedagogi",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311250,"Musiikin ko, muusikko;;",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103080,Musiikin ylempi tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4103050,"Musiikkipedagogi, muusikko",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311080,Opintopalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144080,Opintopalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311080,Opintopalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311080,Opintopalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144060,Opiskelijarekrytointi,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000204,Oppimisen ja hyvinvoinnin palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144070,Oppimisen ja hyvinvoinnin tuki,, @@ -2736,16 +2736,16 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311558,Palvelu- ja projektiliiketoiminnan ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130090,Palvelujen katteeton myynti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311559,Palveluliiketoiminnan johtamisen koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311559,Palveluliiketoiminnan johtamisen koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311559,Palveluliiketoiminnan johtamisen koulutus YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311551,Palveluliiketoiminnan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311551,Palveluliiketoiminnan ko;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311551,Palveluliiketoiminnan ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104,Pedagogiset ratkaisut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000219,Pedagogiset ratkaisut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104000,Pedagogiset ratkaisut hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4104020,Pedagogiset ratkaisut varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4152000,Perusrahoituksen kirjaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102070,Proakatemia,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311310,Proakatemia YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311310,Proakatemia YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102100,Proakatemia/YrittƤjyyden YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150030,Rahastojen poistot,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105,Rakennettu ympƤristƶ ja biotalous,, @@ -2754,65 +2754,65 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105010,"Rakennettu ympƤristƶ ja biotalous laboratoriot,",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105020,Rakennettu ympƤristƶ ja biotalous varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311403,Rakennus- ja yhdyskuntatekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311403,Rakennus- ja yhdyskuntatekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311403,Rakennus- ja yhdyskuntatekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105080,Rakennus- ja yhdyskuntatekniikka,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311407,Rakennusalan tyƶnjohdon ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311407,Rakennusalan tyƶnjohdon ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311407,Rakennusalan tyƶnjohdon ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105090,Rakennusalan tyƶnjohdon tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105070,Rakennusarkkitehdin tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311423,Rakennusarkkitehti,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311423,Rakennusarkkitehti;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311423,Rakennusarkkitehti,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105120,Rakentaminen YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311424,Rakentaminen YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311424,Rakentaminen YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H53,Rakentaminen ja ympƤristƶteknologia,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H53,Rakentaminen ja ympƤristƶteknologia;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H53,Rakentaminen ja ympƤristƶteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311411,Rakentamisen ja talotekniikan ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H1,Rehtorin toimisto;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311000,Rehtorin toimisto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H1,Rehtorin toimisto,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311000,Rehtorin toimisto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102080,Restonomi,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4102110,Restonomi YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105130,Risk Management & Circular Economy (YAMK),, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101080,Rƶntgenhoitaja,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101100,Rƶntgenhoitaja SeinƤjoki,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311510,Rƶntgenhoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311510,Rƶntgenhoitajakoulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311510,Rƶntgenhoitajakoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107060,Sairaanhoitaja,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311502,Sairaanhoitajakoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311502,Sairaanhoitajakoulutus;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311417,Software Engineering;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311502,Sairaanhoitajakoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311417,Software Engineering,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311516,Sosiaali- ja terveysalan johtamisen koulutus (YAMK),, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101120,Sosiaaliala YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101070,Sosionomi,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311503,Sosionomikoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311503,Sosionomikoulutus;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311503,Sosionomikoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311517,Sosionomin koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311517,Sosionomin koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311517,Sosionomin koulutus YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4101090,Sote monimuunto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4147,Strategiarahoitus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4147000,Strategiarahoitus OKM,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120020,Strateginen TK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4144050,Summer School,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311404,SƤhkƶ- ja automaatiotekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311404,SƤhkƶ- ja automaatiotekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311404,SƤhkƶ- ja automaatiotekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106080,SƤhkƶ- ja automaatiotekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H6,311600,TAMK Ammatillinen opettajankoulutus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311600,TAMK Ammatillinen opettajankoulutus;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311110,TAMK EDU;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311600,TAMK Ammatillinen opettajankoulutus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311110,TAMK EDU,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142,TKI ja maksullinen palvelutoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000050,TKI ja maksullinen palvelutoiminta,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142000,TKI ja maksullinen palvelutoiminta hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000063,TKI-palvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311800,TKI-palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120000,TKI-palvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311800,TKI-palvelut;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H51,"Taide, musiikki ja media",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H51,"Taide, musiikki ja media;;",, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H51,"Taide, musiikki ja media",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H53,311406,"Talotekniikan ko , sƤhkƶinen talotekniikka, LVI-tekniikka",, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311411,Talotekniikan ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311411,Talotekniikan ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311406,"Talotekniikan ko, LVI-tekniikka / sƤhkƶinen talo",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105100,Talotekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4105110,Talotekniikka YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311004,Talous- ja projektipalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311004,Talous- ja projektipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311004,Talous- ja projektipalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143020,"Talous-, projekti- ja hankintapalvelut",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4147010,Tampere3,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4100,Tampereen ammattikorkeakoulu,, @@ -2821,16 +2821,16 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4150,Tekniset kustannuspaikat,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106110,Teknologiajohtaminen koulutus YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311413,Teknologiaosaamisen johtamisen ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311413,Teknologiaosaamisen johtamisen ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311413,Teknologiaosaamisen johtamisen ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H54,Teollisuusteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106,Teollisuusteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,1010000223,Teollisuusteknologia,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H54,Teollisuusteknologia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106000,Teollisuusteknologia hallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106010,"Teollisuusteknologia laboratoriot, opetustilat ja",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106020,Teollisuusteknologia varaukset,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H54,Teollisuusteknologia;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311518,Terveyden edistƤmisen koulutus YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311518,Terveyden edistƤmisen koulutus YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311518,Terveyden edistƤmisen koulutus YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107070,Terveyden edistƤmisen ylempi tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H55,311524,Terveydenhoitajakoulutus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107,Terveys,, @@ -2839,26 +2839,26 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107010,"Terveys laboratoriot, opetustilat ja -tarvikkeet",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4107020,Terveys varaukset,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H5,H55,Terveys- ja sosiaalipalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311516,Terveys- ja sosiaalipalvelut YAMK;;,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H55,Terveys- ja sosiaalipalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,H55,Terveys- ja sosiaalipalvelut,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311516,Terveys- ja sosiaalipalvelut YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106070,TiTe Software Engineering,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H54,311405,Tieto- ja viestintƤtekniikan ko,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311405,Tieto- ja viestintƤtekniikan ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311405,Tieto- ja viestintƤtekniikan ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311060,Tietohallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143040,Tietohallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143050,Tietohallinto,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311060,Tietohallinto;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311060,Tietohallinto,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106050,TietojenkƤsittely,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311350,TietojenkƤsittely ko ;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311350,TietojenkƤsittely ko ,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311350,TietojenkƤsittelyn ko,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106120,TietojƤrjestelmƤosaaminen YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H51,311351,TietojƤrjestelmƤosaamisen ko YAMK,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311351,TietojƤrjestelmƤosaamisen ko YAMK;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311351,TietojƤrjestelmƤosaamisen ko YAMK,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4106060,Tietotekniikan tutkinto-ohjelma,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143030,Tila- ja kiinteistƶpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4143060,Tila- ja kiinteistƶpalvelut TAU,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H3,311041,Tilapalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311041,Tilapalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311041,Tilapalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130030,Tuotteistetut palvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4120,"Tutkimus, kehitys ja innovaatiotoiminta",, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108010,Tutkintoon johtava myytƤvƤ koulutus,, @@ -2870,10 +2870,10 @@ Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen am Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4121,Ulkoiset projektit,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H4,311031,ViestintƤpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4141040,ViestintƤpalvelut,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311031,ViestintƤpalvelut;;,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311031,ViestintƤpalvelut,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4130060,Vuokraus,, +Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311924,Y-Kampus,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4142020,Y-Kampus,, -Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311924,Y-Kampus;;,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,4108000,Yhteiset opinnot,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,,02630,H52,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia,, Tampereen ammattikorkeakoulu,Tampere University of Applied Sciences,Tampereen ammattikorkeakoulu,02630,,311309,YrittƤjyyden ja tiimijohtamisen ko / Proakatemia;,, From edb002c222d4c05f5c927a019481d5491be18058 Mon Sep 17 00:00:00 2001 From: Toni Date: Thu, 27 May 2021 17:46:22 +0300 Subject: [PATCH 010/160] format with black --- src/metax_api/management/commands/update_orgs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index e7a8d26e..64d0ca52 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -65,7 +65,7 @@ def compare_and_update(self, other): changes += 1 elif self.org_csc is not None and other.org_csc is None: other.org_csc = self.org_csc - changes +=1 + changes += 1 if self.unit_name.endswith(";;"): self.unit_name = self.unit_name[:-2] @@ -81,7 +81,7 @@ def __post_init__(self): def get_orgs_from_api() -> List[Organization]: res = requests.get( - "https://researchfi-api-production-researchfi.rahtiapp.fi/portalapi/organization/_search" + "https://researchfi-api-production-researchfi.rahtiapp.fi/portalapi/organization/_search?size=100" ) data = res.json() From f0111b6b03d30f4d69d009cedb21a426a60455b1 Mon Sep 17 00:00:00 2001 From: Toni Date: Thu, 27 May 2021 17:53:31 +0300 Subject: [PATCH 011/160] remove unnecessary import --- src/metax_api/management/commands/update_orgs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index 64d0ca52..ef91342e 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -1,6 +1,5 @@ import csv import logging -from collections import OrderedDict from dataclasses import asdict, dataclass, field from typing import List From 602a9d97843fd8f66701c811066d43bec4440c86 Mon Sep 17 00:00:00 2001 From: Toni Date: Wed, 2 Jun 2021 13:01:55 +0300 Subject: [PATCH 012/160] fix encoding errors in csv --- src/metax_api/management/commands/update_orgs.py | 12 ++++++++++-- .../resources/organizations/organizations.csv | 16 +++++++--------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index ef91342e..75bc7650 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -34,8 +34,6 @@ class Organization: org_csc: str = field(default="", compare=False) unit_main_code: str = field(default="", compare=True) - - def compare_and_update(self, other): changes = 0 match = self == other @@ -168,5 +166,15 @@ def handle(self, *args, **options): ) writer.writeheader() for i in csv_serialized: + # Don't write header as row + if i["org_name_fi"] == "org_name_fi": + continue + # Malformed values from TTV api + if i["unit_name"] == "Lƃā€žĆƒā€žKETIETEELLINEN TIEDEKUNTA": + continue + if "ƃā€¦bo" in i["unit_name"]: + i["unit_name"] = str(i["unit_name"]).replace("ƃā€¦bo", "ƅbo") + if "ƃā€“S" in i["unit_name"]: + i["unit_name"] = str(i["unit_name"]).replace("ƃā€“S", "Ɩ") writer.writerow(i) logger.info("successfully updated organization csv") diff --git a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv index d04e8d23..0442f975 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv +++ b/src/metax_api/tasks/refdata/refdata_indexer/resources/organizations/organizations.csv @@ -1691,7 +1691,6 @@ Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240037,Logopedi Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2400370,Logopedia,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240200,Luonnontieteellinen tiedekunta yhteiset,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,2402000,Luonnontieteellinen tiedekunta yhteiset,, -Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24030,Lƃā€žĆƒā€žKETIETEELLINEN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,2410,24030,LƄƄKETIETEELLINEN TIEDEKUNTA,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,24093,LƤhipalvelut,, Oulun yliopisto,University of Oulu,UleĆ„borgs universitet,01904,,240300,LƤƤketieteellinen tiedekunta yhteiset,, @@ -3648,7 +3647,7 @@ Yrkeshƶgskolan Novia,Novia University of Applied Sciences,,10066,,,,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,AD,Administration,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,JA,Enheten Jakobstad;Enheten Jakobstad;,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,VA,Enheten Vasa;Enheten Vasa;,, -Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,ƃā€¦R,Enheten ƃā€¦boRaseborg;Enheten ƃā€¦boRaseborg;,, +Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,ƃā€¦R,Enheten ƅboRaseborg;Enheten ƅboRaseborg;,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,FoU,Forskning och utveckling,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,BE,Institutionen fƶr bioekonomi,, Yrkeshƶgskolan Novia,Novia University of Applied Sciences,Yrkeshƶgskolan Novia,10066,,FE,Institutionen fƶr fƶretagsekonomi,, @@ -3661,7 +3660,6 @@ Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,NV,Naturvetenskap,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,SF,Sjƶfart,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,TE,Teknik,, Yrkeshƶgskolan Novia,,Yrkeshƶgskolan Novia,10066,,VS,VĆ„rd och sociala,, -org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_name,org_isni,org_csc ƅbo Akademi,ƅbo Akademi University,,01903,,,,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,22,AllmƤn pedagogik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,111,Analytisk kemi,, @@ -3747,7 +3745,7 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2804010,FNT universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802000,FPV Gemensamma kostnader,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280200,FPV StƶdtjƤnster,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280220,FPV Vasa ƃā€“vningsskola (Vƃā€“S),, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280220,FPV Vasa ƃā€“vningsskola (VƖ),, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802010,FPV universitetsservice,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802150,FPV utbildning,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2803000,FSE Gemensamma kostnader,, @@ -3945,17 +3943,17 @@ org_name_fi,org_name_en,org_name_sv,org_code,unit_main_code,unit_sub_code,unit_n ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,30,Vasa ƶvningsskola,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,79,Verkstaden,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,25,Vuxenpedagogik,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,Vƃā€“S Gemensamma kostnader,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,Vƃā€“S Grundutbildning,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,Vƃā€“S Gymnasiet,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802200,VƖ Gemensamma kostnader,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802210,VƖ Grundutbildning,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2802220,VƖ Gymnasiet,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,31,VƖS gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,34,"VƖS, gymnasiet",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,32,"VƖS, Ć„k 1-6",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,33,"VƖS, Ć„k 7-9",, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,120,VƤrmeteknik,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,62,VĆ„rdvetenskap,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƃā€¦bo Akademis Bibliotek,, -ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280600,ƃā€¦bo Akademis Bibliotek,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,2806000,ƅbo Akademis Bibliotek,, +ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,280600,ƅbo Akademis Bibliotek,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,136,ƅA gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,137,ƅA gemensamma,, ƅbo Akademi,ƅbo Akademi University,ƅbo Akademi,01903,,133,ƅAB,, From f6303e0f42b7bec69b5975a180b5a02998c684e4 Mon Sep 17 00:00:00 2001 From: Toni Date: Wed, 2 Jun 2021 13:09:28 +0300 Subject: [PATCH 013/160] run black --- src/metax_api/management/commands/update_orgs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/metax_api/management/commands/update_orgs.py b/src/metax_api/management/commands/update_orgs.py index 75bc7650..fb81d844 100644 --- a/src/metax_api/management/commands/update_orgs.py +++ b/src/metax_api/management/commands/update_orgs.py @@ -137,7 +137,9 @@ def handle(self, *args, **options): for a in api_orgs: match, changes = i.compare_and_update(a) if match and changes > 0: - logger.info(f"updated org {i.org_name_fi} with {changes} changes in unit: {a.unit_name}") + logger.info( + f"updated org {i.org_name_fi} with {changes} changes in unit: {a.unit_name}" + ) union.append(i) # add missing orgs to local ones added = 0 From c4f371c1d361dec9fcacfae67a4c23354d69568a Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 4 May 2021 14:18:49 +0300 Subject: [PATCH 014/160] add Fairdata tracking header --- docs/v1/source/_templates/layout.html | 7 +++++++ docs/v2/source/_templates/layout.html | 7 +++++++ 2 files changed, 14 insertions(+) create mode 100644 docs/v1/source/_templates/layout.html create mode 100644 docs/v2/source/_templates/layout.html diff --git a/docs/v1/source/_templates/layout.html b/docs/v1/source/_templates/layout.html new file mode 100644 index 00000000..ca18b558 --- /dev/null +++ b/docs/v1/source/_templates/layout.html @@ -0,0 +1,7 @@ +{% extends "!layout.html" %} + +{% block extrahead %} + + + +{% endblock %} \ No newline at end of file diff --git a/docs/v2/source/_templates/layout.html b/docs/v2/source/_templates/layout.html new file mode 100644 index 00000000..ca18b558 --- /dev/null +++ b/docs/v2/source/_templates/layout.html @@ -0,0 +1,7 @@ +{% extends "!layout.html" %} + +{% block extrahead %} + + + +{% endblock %} \ No newline at end of file From 01816e0a7ad2eb7ab9f808022aea053a3eb0d3e1 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 4 May 2021 14:38:10 +0300 Subject: [PATCH 015/160] add optional PyYaml dependency --- poetry.lock | 42 +++++++++++++++++++++++++++++++++++++++++- pyproject.toml | 2 ++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index 390f7345..56b790f8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -755,6 +755,14 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "pyyaml" +version = "5.4.1" +description = "YAML parser and emitter for Python" +category = "main" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" + [[package]] name = "rdflib" version = "5.0.0" @@ -1098,11 +1106,12 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pyt [extras] docs = ["Sphinx", "sphinx-autobuild", "sphinx-rtd-theme"] simplexquery = ["python-simplexquery"] +swagger = ["PyYAML"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "8fba4a295a68a34d35e8f3da03922740c3fdfbe84edf00945c7c67d0016b6115" +content-hash = "428eeb62e466c2d516b96fb115be858ef72eaa17d932fcf714f8ba7da863ffaf" [metadata.files] alabaster = [ @@ -1510,6 +1519,37 @@ pytz = [ {file = "pytz-2021.1-py2.py3-none-any.whl", hash = "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798"}, {file = "pytz-2021.1.tar.gz", hash = "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da"}, ] +pyyaml = [ + {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"}, + {file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"}, + {file = "PyYAML-5.4.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8"}, + {file = "PyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185"}, + {file = "PyYAML-5.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253"}, + {file = "PyYAML-5.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc"}, + {file = "PyYAML-5.4.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347"}, + {file = "PyYAML-5.4.1-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541"}, + {file = "PyYAML-5.4.1-cp36-cp36m-win32.whl", hash = "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5"}, + {file = "PyYAML-5.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df"}, + {file = "PyYAML-5.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018"}, + {file = "PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63"}, + {file = "PyYAML-5.4.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa"}, + {file = "PyYAML-5.4.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0"}, + {file = "PyYAML-5.4.1-cp37-cp37m-win32.whl", hash = "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b"}, + {file = "PyYAML-5.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf"}, + {file = "PyYAML-5.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46"}, + {file = "PyYAML-5.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb"}, + {file = "PyYAML-5.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247"}, + {file = "PyYAML-5.4.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc"}, + {file = "PyYAML-5.4.1-cp38-cp38-win32.whl", hash = "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc"}, + {file = "PyYAML-5.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696"}, + {file = "PyYAML-5.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77"}, + {file = "PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183"}, + {file = "PyYAML-5.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122"}, + {file = "PyYAML-5.4.1-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6"}, + {file = "PyYAML-5.4.1-cp39-cp39-win32.whl", hash = "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10"}, + {file = "PyYAML-5.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db"}, + {file = "PyYAML-5.4.1.tar.gz", hash = "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"}, +] rdflib = [ {file = "rdflib-5.0.0-py3-none-any.whl", hash = "sha256:88208ea971a87886d60ae2b1a4b2cdc263527af0454c422118d43fe64b357877"}, {file = "rdflib-5.0.0.tar.gz", hash = "sha256:78149dd49d385efec3b3adfbd61c87afaf1281c30d3fcaf1b323b34f603fb155"}, diff --git a/pyproject.toml b/pyproject.toml index d18f17f2..77e3d5c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ Sphinx = {version = "^3.5.4", optional = true} sphinx-autobuild = {version = "^2021.3.14", optional = true} sphinx-rtd-theme = {version = "^0.5.2", optional = true} python-box = "^5.3.0" +PyYAML = { version = "^5.4.1", optional = true} [tool.poetry.dev-dependencies] responses = "^0.13.2" @@ -46,6 +47,7 @@ coverage = {version = "^5.5", extras = ["toml"]} [tool.poetry.extras] simplexquery = ["python-simplexquery"] docs = ["Sphinx", "sphinx-autobuild", "sphinx-rtd-theme"] +swagger = ["PyYAML"] [tool.isort] profile = "black" From 77dec4b3be767361658849b98feba4af782fbb35 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 4 May 2021 14:38:30 +0300 Subject: [PATCH 016/160] Tracking header for swagger --- swagger/swagger-yaml-to-html.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/swagger/swagger-yaml-to-html.py b/swagger/swagger-yaml-to-html.py index 287201fa..d2208422 100755 --- a/swagger/swagger-yaml-to-html.py +++ b/swagger/swagger-yaml-to-html.py @@ -35,6 +35,8 @@ + + Swagger UI From 11af2c89c046445d6aa18c100311ada1f272f3a2 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 4 May 2021 14:38:55 +0300 Subject: [PATCH 017/160] Add tracking header --- docs/v1/source/_templates/layout.html | 2 +- docs/v2/source/_templates/layout.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/v1/source/_templates/layout.html b/docs/v1/source/_templates/layout.html index ca18b558..0dc09e7f 100644 --- a/docs/v1/source/_templates/layout.html +++ b/docs/v1/source/_templates/layout.html @@ -1,7 +1,7 @@ {% extends "!layout.html" %} {% block extrahead %} - + {% endblock %} \ No newline at end of file diff --git a/docs/v2/source/_templates/layout.html b/docs/v2/source/_templates/layout.html index ca18b558..0dc09e7f 100644 --- a/docs/v2/source/_templates/layout.html +++ b/docs/v2/source/_templates/layout.html @@ -1,7 +1,7 @@ {% extends "!layout.html" %} {% block extrahead %} - + {% endblock %} \ No newline at end of file From 8923507af81f934ecb8228dd9a0dbcbf01aaa6f2 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 4 May 2021 14:49:32 +0300 Subject: [PATCH 018/160] update deps --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index cf0e112f..25bf7750 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,6 +32,7 @@ ipython-genutils==0.2.0; python_version >= "3.7" ipython==7.22.0; python_version >= "3.7" isbnid-fork==0.5.2 isodate==0.6.0 +isort==5.8.0; python_version >= "3.6" and python_version < "4.0" jedi==0.18.0; python_version >= "3.7" jsonschema==3.2.0 lxml==4.6.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") From eafbb919413f3233f5bf0520051b80ccd7fd0ad7 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 4 May 2021 16:42:30 +0300 Subject: [PATCH 019/160] fdwe-service = METAX --- docs/v1/source/_templates/layout.html | 2 +- docs/v2/source/_templates/layout.html | 2 +- swagger/swagger-yaml-to-html.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/v1/source/_templates/layout.html b/docs/v1/source/_templates/layout.html index 0dc09e7f..e51f202a 100644 --- a/docs/v1/source/_templates/layout.html +++ b/docs/v1/source/_templates/layout.html @@ -1,7 +1,7 @@ {% extends "!layout.html" %} {% block extrahead %} - + {% endblock %} \ No newline at end of file diff --git a/docs/v2/source/_templates/layout.html b/docs/v2/source/_templates/layout.html index 0dc09e7f..e51f202a 100644 --- a/docs/v2/source/_templates/layout.html +++ b/docs/v2/source/_templates/layout.html @@ -1,7 +1,7 @@ {% extends "!layout.html" %} {% block extrahead %} - + {% endblock %} \ No newline at end of file diff --git a/swagger/swagger-yaml-to-html.py b/swagger/swagger-yaml-to-html.py index d2208422..359f575d 100755 --- a/swagger/swagger-yaml-to-html.py +++ b/swagger/swagger-yaml-to-html.py @@ -35,7 +35,7 @@ - + Swagger UI From 0b3dbb1dc66bf28296c847d3022d4b9be1d1cdb2 Mon Sep 17 00:00:00 2001 From: Toni Date: Wed, 5 May 2021 10:55:34 +0300 Subject: [PATCH 020/160] add auth page to tracking --- src/metax_api/templates/secure/auth_success.html | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/metax_api/templates/secure/auth_success.html b/src/metax_api/templates/secure/auth_success.html index 4c309deb..6e72f38a 100755 --- a/src/metax_api/templates/secure/auth_success.html +++ b/src/metax_api/templates/secure/auth_success.html @@ -2,6 +2,8 @@ + + Metax End User Authentication From f9326c859970fa78bf71ad1b1e4d22d49b3738aa Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 10 May 2021 12:47:08 +0300 Subject: [PATCH 021/160] add sphinx and swagger deps to requirements.txt, update readme --- README.md | 4 ++-- requirements.txt | 28 ++++++++++++++++++++++++---- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 462d285d..5878c391 100755 --- a/README.md +++ b/README.md @@ -16,11 +16,11 @@ The recommended way to run the development setup is to use [Docker-swarm setup]( Install [Poetry](https://python-poetry.org/docs/) for your OS. Navigate to the repository root and run command `poetry install`. this will create and activate new Python virtualenv, installing all necessary Python packages to it. -You can generate traditional requirements.txt file with `poetry export --dev -E simplexquery --without-hashes -f requirements.txt --output requirements.txt` +You can generate traditional requirements.txt file with `poetry export --dev -E "simplexquery docs swagger" --without-hashes -f requirements.txt --output requirements.txt` ### Managing dependencies -__NOTICE: Please remember to execute `poetry export --dev -E simplexquery --without-hashes -f requirements.txt --output requirements.txt` after any additions, updates or removals.__ +__NOTICE: Please remember to execute `poetry export --dev -E "simplexquery docs swagger" --without-hashes -f requirements.txt --output requirements.txt` after any additions, updates or removals.__ Developer dependencies can be added with command `poetry add -D ` Application dependencies can be added with command `poetry add ` diff --git a/requirements.txt b/requirements.txt index 25bf7750..49b79be7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,11 @@ +alabaster==0.7.12; python_version >= "3.6" appdirs==1.4.4; python_version >= "3.6" appnope==0.1.2; sys_platform == "darwin" and python_version >= "3.7" asgiref==3.3.4; python_version >= "3.6" asttokens==2.0.5 attrs==20.3.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" autosemver==0.5.5 +babel==2.9.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" backcall==0.2.0; python_version >= "3.7" black==20.8b1; python_version >= "3.6" certifi==2020.12.5; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" @@ -19,6 +21,7 @@ django-split-settings==1.0.1; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 django==3.1.8; python_version >= "3.6" djangorestframework==3.12.4; python_version >= "3.5" +docutils==0.16; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" dulwich==0.19.16 elasticsearch==7.12.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") executing==0.6.0 @@ -26,6 +29,7 @@ gunicorn==20.1.0; python_version >= "3.5" icecream==2.1.0 idna==2.10; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" idutils==1.1.8 +imagesize==1.2.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" importlib-metadata==4.0.1; python_version >= "3.6" and python_version < "3.8" ipdb==0.13.7; python_version >= "2.7" ipython-genutils==0.2.0; python_version >= "3.7" @@ -34,9 +38,13 @@ isbnid-fork==0.5.2 isodate==0.6.0 isort==5.8.0; python_version >= "3.6" and python_version < "4.0" jedi==0.18.0; python_version >= "3.7" +jinja2==2.11.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" jsonschema==3.2.0 +livereload==2.6.3; python_version >= "3.6" lxml==4.6.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") +markupsafe==1.1.1; python_version >= "3.5" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.5" mypy-extensions==0.4.3; python_version >= "3.6" +packaging==20.9; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" parso==0.8.2; python_version >= "3.7" pathspec==0.8.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" pexpect==4.8.0; sys_platform != "win32" and python_version >= "3.7" @@ -48,22 +56,34 @@ ptyprocess==0.7.0; sys_platform != "win32" and python_version >= "3.7" pygments==2.8.1; python_version >= "3.7" pyjwt==2.1.0; python_version >= "3.6" pyoai==2.5.0 -pyparsing==2.4.7; python_version >= "2.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" +pyparsing==2.4.7; python_version >= "3.5" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.5" pyrsistent==0.17.3; python_version >= "3.5" python-box==5.3.0; python_version >= "3.6" python-dateutil==2.8.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0") python-simplexquery==1.0.5.3 -pytz==2021.1; python_version >= "3.6" +pytz==2021.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +pyyaml==5.4.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.6.0") rdflib==5.0.0 redis==3.5.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") regex==2021.4.4; python_version >= "3.6" -requests==2.25.1; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" +requests==2.25.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" responses==0.13.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -six==1.15.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" +six==1.15.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +snowballstemmer==2.1.0; python_version >= "3.6" +sphinx-autobuild==2021.3.14; python_version >= "3.6" +sphinx-rtd-theme==0.5.2 +sphinx==3.5.4; python_version >= "3.5" +sphinxcontrib-applehelp==1.0.2; python_version >= "3.6" +sphinxcontrib-devhelp==1.0.2; python_version >= "3.6" +sphinxcontrib-htmlhelp==1.0.3; python_version >= "3.6" +sphinxcontrib-jsmath==1.0.1; python_version >= "3.6" +sphinxcontrib-qthelp==1.0.3; python_version >= "3.6" +sphinxcontrib-serializinghtml==1.1.4; python_version >= "3.6" sqlparse==0.4.1; python_version >= "3.6" structlog==21.1.0; python_version >= "3.6" tblib==1.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") toml==0.10.2; python_version > "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version > "3.6" +tornado==6.1; python_version >= "3.6" traitlets==5.0.5; python_version >= "3.7" typed-ast==1.4.3; python_version >= "3.6" typing-extensions==3.7.4.3; python_version < "3.8" and python_version >= "3.6" From d35868bcba20118d75c1fc00fcf8f6dfa88d740c Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 14 Jun 2021 10:44:07 +0300 Subject: [PATCH 022/160] CSCFAIRMETA-1077: update pyoai using git rev - PyPi did not have the latest bug fixed so update from git version - Also update the tests to prevent these in the future --- poetry.lock | 233 ++++++++++-------- pyproject.toml | 2 +- requirements.txt | 34 +-- src/metax_api/tests/api/oaipmh/minimal_api.py | 28 ++- 4 files changed, 171 insertions(+), 126 deletions(-) diff --git a/poetry.lock b/poetry.lock index 56b790f8..6fe4e5ae 100644 --- a/poetry.lock +++ b/poetry.lock @@ -52,17 +52,17 @@ test = ["astroid", "pytest"] [[package]] name = "attrs" -version = "20.3.0" +version = "21.2.0" description = "Classes Without Boilerplate" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "furo", "sphinx", "pre-commit"] -docs = ["furo", "sphinx", "zope.interface"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six"] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit"] +docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins"] [[package]] name = "autosemver" @@ -118,7 +118,7 @@ d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] [[package]] name = "certifi" -version = "2020.12.5" +version = "2021.5.30" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -134,11 +134,15 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "click" -version = "7.1.2" +version = "8.0.1" description = "Composable command line interface toolkit" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" @@ -183,7 +187,7 @@ tests = ["responses (>=0.10.6)", "mock (>=1.3.0)", "pytest-invenio (>=1.4.0)"] [[package]] name = "decorator" -version = "5.0.7" +version = "5.0.9" description = "Decorators for Humans" category = "dev" optional = false @@ -191,7 +195,7 @@ python-versions = ">=3.5" [[package]] name = "django" -version = "3.1.8" +version = "3.1.12" description = "A high-level Python Web framework that encourages rapid development and clean, pragmatic design." category = "main" optional = false @@ -294,7 +298,7 @@ https = ["urllib3[secure] (>=1.24.1)"] [[package]] name = "elasticsearch" -version = "7.12.1" +version = "7.13.1" description = "Python client for Elasticsearch" category = "main" optional = false @@ -381,7 +385,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.0.1" +version = "4.5.0" description = "Read metadata from Python packages" category = "main" optional = false @@ -397,19 +401,20 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytes [[package]] name = "ipdb" -version = "0.13.7" +version = "0.13.9" description = "IPython-enabled pdb" category = "dev" optional = false python-versions = ">=2.7" [package.dependencies] +decorator = {version = "*", markers = "python_version > \"3.6\""} ipython = {version = ">=7.17.0", markers = "python_version > \"3.6\""} toml = {version = ">=0.10.2", markers = "python_version > \"3.6\""} [[package]] name = "ipython" -version = "7.22.0" +version = "7.24.1" description = "IPython: Productive Interactive Computing" category = "dev" optional = false @@ -421,6 +426,7 @@ backcall = "*" colorama = {version = "*", markers = "sys_platform == \"win32\""} decorator = "*" jedi = ">=0.16" +matplotlib-inline = "*" pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} pickleshare = "*" prompt-toolkit = ">=2.0.0,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.1.0" @@ -428,7 +434,7 @@ pygments = "*" traitlets = ">=4.2" [package.extras] -all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.16)", "pygments", "qtconsole", "requests", "testpath"] +all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.17)", "pygments", "qtconsole", "requests", "testpath"] doc = ["Sphinx (>=1.3)"] kernel = ["ipykernel"] nbconvert = ["nbconvert"] @@ -436,7 +442,7 @@ nbformat = ["nbformat"] notebook = ["notebook", "ipywidgets"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.16)"] +test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.17)"] [[package]] name = "ipython-genutils" @@ -501,17 +507,17 @@ testing = ["Django (<3.1)", "colorama", "docopt", "pytest (<6.0.0)"] [[package]] name = "jinja2" -version = "2.11.3" +version = "3.0.1" description = "A very fast and expressive template engine." category = "main" optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [package.dependencies] -MarkupSafe = ">=0.23" +MarkupSafe = ">=2.0" [package.extras] -i18n = ["Babel (>=0.8)"] +i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" @@ -559,11 +565,22 @@ source = ["Cython (>=0.29.7)"] [[package]] name = "markupsafe" -version = "1.1.1" +version = "2.0.1" description = "Safely add untrusted strings to HTML/XML markup." category = "main" optional = true -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +python-versions = ">=3.6" + +[[package]] +name = "matplotlib-inline" +version = "0.1.2" +description = "Inline Matplotlib backend for Jupyter" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +traitlets = "*" [[package]] name = "mypy-extensions" @@ -665,7 +682,7 @@ python-versions = "*" [[package]] name = "pygments" -version = "2.8.1" +version = "2.9.0" description = "Pygments is a syntax highlighting package written in Python." category = "main" optional = false @@ -687,16 +704,23 @@ tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] [[package]] name = "pyoai" -version = "2.5.0" -description = "The oaipmh module is a Python implementation of an \"Open Archives\nInitiative Protocol for Metadata Harvesting\" (version 2) client and server.\nThe protocol is described here:\n\nhttp://www.openarchives.org/OAI/openarchivesprotocol.html" +version = "2.5.1(unreleased)" +description = "" category = "main" optional = false python-versions = "*" +develop = false [package.dependencies] lxml = "*" six = "*" +[package.source] +type = "git" +url = "https://github.com/infrae/pyoai" +reference = "5f6eba12" +resolved_reference = "5f6eba1201270d930ed684e15e9b9fc885649d17" + [[package]] name = "pyparsing" version = "2.4.7" @@ -837,7 +861,7 @@ tests = ["coverage (>=3.7.1,<6.0.0)", "pytest-cov", "pytest-localserver", "flake [[package]] name = "six" -version = "1.15.0" +version = "1.16.0" description = "Python 2 and 3 compatibility utilities" category = "main" optional = false @@ -939,11 +963,11 @@ test = ["pytest"] [[package]] name = "sphinxcontrib-htmlhelp" -version = "1.0.3" +version = "2.0.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" category = "main" optional = true -python-versions = ">=3.5" +python-versions = ">=3.6" [package.extras] lint = ["flake8", "mypy", "docutils-stubs"] @@ -974,7 +998,7 @@ test = ["pytest"] [[package]] name = "sphinxcontrib-serializinghtml" -version = "1.1.4" +version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." category = "main" optional = true @@ -1056,7 +1080,7 @@ python-versions = "*" [[package]] name = "typing-extensions" -version = "3.7.4.3" +version = "3.10.0.0" description = "Backported and Experimental Type Hints for Python 3.5+" category = "main" optional = false @@ -1064,16 +1088,16 @@ python-versions = "*" [[package]] name = "urllib3" -version = "1.26.4" +version = "1.26.5" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" [package.extras] +brotli = ["brotlipy (>=0.6.0)"] secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] -brotli = ["brotlipy (>=0.6.0)"] [[package]] name = "wcwidth" @@ -1111,7 +1135,7 @@ swagger = ["PyYAML"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "428eeb62e466c2d516b96fb115be858ef72eaa17d932fcf714f8ba7da863ffaf" +content-hash = "22ec248442bd368b260536eca33ee3856f3f110a04bea6d61ebee9d14660a378" [metadata.files] alabaster = [ @@ -1135,8 +1159,8 @@ asttokens = [ {file = "asttokens-2.0.5.tar.gz", hash = "sha256:9a54c114f02c7a9480d56550932546a3f1fe71d8a02f1bc7ccd0ee3ee35cf4d5"}, ] attrs = [ - {file = "attrs-20.3.0-py2.py3-none-any.whl", hash = "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6"}, - {file = "attrs-20.3.0.tar.gz", hash = "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700"}, + {file = "attrs-21.2.0-py2.py3-none-any.whl", hash = "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1"}, + {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"}, ] autosemver = [ {file = "autosemver-0.5.5.tar.gz", hash = "sha256:0af1e8a9c3604545c067311f1c26403e8f0d60b5d9561c0217e14eee21c98b02"}, @@ -1153,16 +1177,16 @@ black = [ {file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"}, ] certifi = [ - {file = "certifi-2020.12.5-py2.py3-none-any.whl", hash = "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830"}, - {file = "certifi-2020.12.5.tar.gz", hash = "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c"}, + {file = "certifi-2021.5.30-py2.py3-none-any.whl", hash = "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"}, + {file = "certifi-2021.5.30.tar.gz", hash = "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee"}, ] chardet = [ {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"}, ] click = [ - {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, - {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, + {file = "click-8.0.1-py3-none-any.whl", hash = "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"}, + {file = "click-8.0.1.tar.gz", hash = "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a"}, ] colorama = [ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, @@ -1227,12 +1251,12 @@ datacite = [ {file = "datacite-1.1.1.tar.gz", hash = "sha256:4e3d3153d849f0a5f331ba585bf4d2f6fcc87f63738bbf9621f19f3727ae3e4d"}, ] decorator = [ - {file = "decorator-5.0.7-py3-none-any.whl", hash = "sha256:945d84890bb20cc4a2f4a31fc4311c0c473af65ea318617f13a7257c9a58bc98"}, - {file = "decorator-5.0.7.tar.gz", hash = "sha256:6f201a6c4dac3d187352661f508b9364ec8091217442c9478f1f83c003a0f060"}, + {file = "decorator-5.0.9-py3-none-any.whl", hash = "sha256:6e5c199c16f7a9f0e3a61a4a54b3d27e7dad0dbdde92b944426cb20914376323"}, + {file = "decorator-5.0.9.tar.gz", hash = "sha256:72ecfba4320a893c53f9706bebb2d55c270c1e51a28789361aa93e4a21319ed5"}, ] django = [ - {file = "Django-3.1.8-py3-none-any.whl", hash = "sha256:c348b3ddc452bf4b62361f0752f71a339140c777ebea3cdaaaa8fdb7f417a862"}, - {file = "Django-3.1.8.tar.gz", hash = "sha256:f8393103e15ec2d2d313ccbb95a3f1da092f9f58d74ac1c61ca2ac0436ae1eac"}, + {file = "Django-3.1.12-py3-none-any.whl", hash = "sha256:a523d62b7ab2908f551dabc32b99017a86aa7784e32b761708e52be3dce6d35d"}, + {file = "Django-3.1.12.tar.gz", hash = "sha256:dc41bf07357f1f4810c1c555b685cb51f780b41e37892d6cc92b89789f2847e1"}, ] django-debug-toolbar = [ {file = "django-debug-toolbar-3.2.1.tar.gz", hash = "sha256:a5ff2a54f24bf88286f9872836081078f4baa843dc3735ee88524e89f8821e33"}, @@ -1271,8 +1295,8 @@ dulwich = [ {file = "dulwich-0.19.16.tar.gz", hash = "sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850"}, ] elasticsearch = [ - {file = "elasticsearch-7.12.1-py2.py3-none-any.whl", hash = "sha256:1840fea8c305224b8c28acabc8697f739cdfb03618f2d2427b42838971a787f6"}, - {file = "elasticsearch-7.12.1.tar.gz", hash = "sha256:df35d8c638f946f098a74681b18611bdf27ba469fa2063e3dfc8bdc290b11419"}, + {file = "elasticsearch-7.13.1-py2.py3-none-any.whl", hash = "sha256:a09ae1de8869efa6ef2d9a0a9b9f6d9260b0c2506e83dd32bc1119a23fff49a5"}, + {file = "elasticsearch-7.13.1.tar.gz", hash = "sha256:d6bcca0b2e5665d08e6fe6fadc2d4d321affd76ce483603078fc9d3ccd2bc0f9"}, ] executing = [ {file = "executing-0.6.0-py2.py3-none-any.whl", hash = "sha256:a2f10f802b4312b92bd256279b43720271b0d9b540a0dbab7be4c28fbc536479"}, @@ -1298,15 +1322,15 @@ imagesize = [ {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.0.1-py3-none-any.whl", hash = "sha256:d7eb1dea6d6a6086f8be21784cc9e3bcfa55872b52309bc5fad53a8ea444465d"}, - {file = "importlib_metadata-4.0.1.tar.gz", hash = "sha256:8c501196e49fb9df5df43833bdb1e4328f64847763ec8a50703148b73784d581"}, + {file = "importlib_metadata-4.5.0-py3-none-any.whl", hash = "sha256:833b26fb89d5de469b24a390e9df088d4e52e4ba33b01dc5e0e4f41b81a16c00"}, + {file = "importlib_metadata-4.5.0.tar.gz", hash = "sha256:b142cc1dd1342f31ff04bb7d022492b09920cb64fed867cd3ea6f80fe3ebd139"}, ] ipdb = [ - {file = "ipdb-0.13.7.tar.gz", hash = "sha256:178c367a61c1039e44e17c56fcc4a6e7dc11b33561261382d419b6ddb4401810"}, + {file = "ipdb-0.13.9.tar.gz", hash = "sha256:951bd9a64731c444fd907a5ce268543020086a697f6be08f7cc2c9a752a278c5"}, ] ipython = [ - {file = "ipython-7.22.0-py3-none-any.whl", hash = "sha256:c0ce02dfaa5f854809ab7413c601c4543846d9da81010258ecdab299b542d199"}, - {file = "ipython-7.22.0.tar.gz", hash = "sha256:9c900332d4c5a6de534b4befeeb7de44ad0cc42e8327fa41b7685abde58cec74"}, + {file = "ipython-7.24.1-py3-none-any.whl", hash = "sha256:d513e93327cf8657d6467c81f1f894adc125334ffe0e4ddd1abbb1c78d828703"}, + {file = "ipython-7.24.1.tar.gz", hash = "sha256:9bc24a99f5d19721fb8a2d1408908e9c0520a17fff2233ffe82620847f17f1b6"}, ] ipython-genutils = [ {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, @@ -1328,8 +1352,8 @@ jedi = [ {file = "jedi-0.18.0.tar.gz", hash = "sha256:92550a404bad8afed881a137ec9a461fed49eca661414be45059329614ed0707"}, ] jinja2 = [ - {file = "Jinja2-2.11.3-py2.py3-none-any.whl", hash = "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419"}, - {file = "Jinja2-2.11.3.tar.gz", hash = "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"}, + {file = "Jinja2-3.0.1-py3-none-any.whl", hash = "sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4"}, + {file = "Jinja2-3.0.1.tar.gz", hash = "sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4"}, ] jsonschema = [ {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, @@ -1377,39 +1401,44 @@ lxml = [ {file = "lxml-4.6.3.tar.gz", hash = "sha256:39b78571b3b30645ac77b95f7c69d1bffc4cf8c3b157c435a34da72e78c82468"}, ] markupsafe = [ - {file = "MarkupSafe-1.1.1-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161"}, - {file = "MarkupSafe-1.1.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"}, - {file = "MarkupSafe-1.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183"}, - {file = "MarkupSafe-1.1.1-cp27-cp27m-win32.whl", hash = "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b"}, - {file = "MarkupSafe-1.1.1-cp27-cp27m-win_amd64.whl", hash = "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e"}, - {file = "MarkupSafe-1.1.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f"}, - {file = "MarkupSafe-1.1.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1"}, - {file = "MarkupSafe-1.1.1-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5"}, - {file = "MarkupSafe-1.1.1-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1"}, - {file = "MarkupSafe-1.1.1-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735"}, - {file = "MarkupSafe-1.1.1-cp34-cp34m-win32.whl", hash = "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21"}, - {file = "MarkupSafe-1.1.1-cp34-cp34m-win_amd64.whl", hash = "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235"}, - {file = "MarkupSafe-1.1.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b"}, - {file = "MarkupSafe-1.1.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f"}, - {file = "MarkupSafe-1.1.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905"}, - {file = "MarkupSafe-1.1.1-cp35-cp35m-win32.whl", hash = "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1"}, - {file = "MarkupSafe-1.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d"}, - {file = "MarkupSafe-1.1.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff"}, - {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473"}, - {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e"}, - {file = "MarkupSafe-1.1.1-cp36-cp36m-win32.whl", hash = "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66"}, - {file = "MarkupSafe-1.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5"}, - {file = "MarkupSafe-1.1.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d"}, - {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e"}, - {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6"}, - {file = "MarkupSafe-1.1.1-cp37-cp37m-win32.whl", hash = "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2"}, - {file = "MarkupSafe-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c"}, - {file = "MarkupSafe-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15"}, - {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2"}, - {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42"}, - {file = "MarkupSafe-1.1.1-cp38-cp38-win32.whl", hash = "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b"}, - {file = "MarkupSafe-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"}, - {file = "MarkupSafe-1.1.1.tar.gz", hash = "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, + {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, +] +matplotlib-inline = [ + {file = "matplotlib-inline-0.1.2.tar.gz", hash = "sha256:f41d5ff73c9f5385775d5c0bc13b424535c8402fe70ea8210f93e11f3683993e"}, + {file = "matplotlib_inline-0.1.2-py3-none-any.whl", hash = "sha256:5cf1176f554abb4fa98cb362aa2b55c500147e4bdbb07e3fda359143e1da0811"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, @@ -1477,22 +1506,22 @@ psycopg2-binary = [ {file = "psycopg2_binary-2.8.6-cp39-cp39-macosx_10_9_x86_64.macosx_10_9_intel.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:89705f45ce07b2dfa806ee84439ec67c5d9a0ef20154e0e475e2b2ed392a5b83"}, {file = "psycopg2_binary-2.8.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:42ec1035841b389e8cc3692277a0bd81cdfe0b65d575a2c8862cec7a80e62e52"}, {file = "psycopg2_binary-2.8.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7312e931b90fe14f925729cde58022f5d034241918a5c4f9797cac62f6b3a9dd"}, + {file = "psycopg2_binary-2.8.6-cp39-cp39-win32.whl", hash = "sha256:6422f2ff0919fd720195f64ffd8f924c1395d30f9a495f31e2392c2efafb5056"}, + {file = "psycopg2_binary-2.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:15978a1fbd225583dd8cdaf37e67ccc278b5abecb4caf6b2d6b8e2b948e953f6"}, ] ptyprocess = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] pygments = [ - {file = "Pygments-2.8.1-py3-none-any.whl", hash = "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8"}, - {file = "Pygments-2.8.1.tar.gz", hash = "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94"}, + {file = "Pygments-2.9.0-py3-none-any.whl", hash = "sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e"}, + {file = "Pygments-2.9.0.tar.gz", hash = "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f"}, ] pyjwt = [ {file = "PyJWT-2.1.0-py3-none-any.whl", hash = "sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1"}, {file = "PyJWT-2.1.0.tar.gz", hash = "sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130"}, ] -pyoai = [ - {file = "pyoai-2.5.0.tar.gz", hash = "sha256:029521e1f6a819511feb4299a6181b5c312e8a71f7cddc4547e27001e7552be0"}, -] +pyoai = [] pyparsing = [ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, @@ -1610,8 +1639,8 @@ responses = [ {file = "responses-0.13.3.tar.gz", hash = "sha256:18a5b88eb24143adbf2b4100f328a2f5bfa72fbdacf12d97d41f07c26c45553d"}, ] six = [ - {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, - {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] snowballstemmer = [ {file = "snowballstemmer-2.1.0-py2.py3-none-any.whl", hash = "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2"}, @@ -1638,8 +1667,8 @@ sphinxcontrib-devhelp = [ {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, ] sphinxcontrib-htmlhelp = [ - {file = "sphinxcontrib-htmlhelp-1.0.3.tar.gz", hash = "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b"}, - {file = "sphinxcontrib_htmlhelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f"}, + {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"}, + {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"}, ] sphinxcontrib-jsmath = [ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, @@ -1650,8 +1679,8 @@ sphinxcontrib-qthelp = [ {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, ] sphinxcontrib-serializinghtml = [ - {file = "sphinxcontrib-serializinghtml-1.1.4.tar.gz", hash = "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc"}, - {file = "sphinxcontrib_serializinghtml-1.1.4-py2.py3-none-any.whl", hash = "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a"}, + {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, + {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, ] sqlparse = [ {file = "sqlparse-0.4.1-py3-none-any.whl", hash = "sha256:017cde379adbd6a1f15a61873f43e8274179378e95ef3fede90b5aa64d304ed0"}, @@ -1749,13 +1778,13 @@ typed-ast = [ {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, ] typing-extensions = [ - {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, - {file = "typing_extensions-3.7.4.3-py3-none-any.whl", hash = "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918"}, - {file = "typing_extensions-3.7.4.3.tar.gz", hash = "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c"}, + {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"}, + {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"}, + {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"}, ] urllib3 = [ - {file = "urllib3-1.26.4-py2.py3-none-any.whl", hash = "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df"}, - {file = "urllib3-1.26.4.tar.gz", hash = "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937"}, + {file = "urllib3-1.26.5-py2.py3-none-any.whl", hash = "sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c"}, + {file = "urllib3-1.26.5.tar.gz", hash = "sha256:a7acd0977125325f516bda9735fa7142b909a8d01e8b2e4c8108d0984e6e0098"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, diff --git a/pyproject.toml b/pyproject.toml index 77e3d5c8..d1127088 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ django-environ = "^0.4.5" django-split-settings = "^1.0.1" rdflib = "^5.0.0" python-dateutil = "^2.8.1" -pyoai = "^2.5.0" +pyoai = {git = "https://github.com/infrae/pyoai", rev = "5f6eba12"} python-simplexquery = {version = "*", optional = true} # These are here because of: https://github.com/python-poetry/poetry/issues/1644 Sphinx = {version = "^3.5.4", optional = true} diff --git a/requirements.txt b/requirements.txt index 49b79be7..5125b524 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,37 +3,38 @@ appdirs==1.4.4; python_version >= "3.6" appnope==0.1.2; sys_platform == "darwin" and python_version >= "3.7" asgiref==3.3.4; python_version >= "3.6" asttokens==2.0.5 -attrs==20.3.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" +attrs==21.2.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" autosemver==0.5.5 babel==2.9.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" backcall==0.2.0; python_version >= "3.7" black==20.8b1; python_version >= "3.6" -certifi==2020.12.5; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" +certifi==2021.5.30; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" chardet==4.0.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" -click==7.1.2; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" -colorama==0.4.4; python_version >= "3.7" and python_full_version < "3.0.0" and sys_platform == "win32" or sys_platform == "win32" and python_version >= "3.7" and python_full_version >= "3.5.0" +click==8.0.1; python_version >= "3.6" +colorama==0.4.4; python_version >= "3.7" and python_full_version < "3.0.0" and platform_system == "Windows" and sys_platform == "win32" or platform_system == "Windows" and python_version >= "3.7" and python_full_version >= "3.5.0" and sys_platform == "win32" +coverage==5.5; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") datacite==1.1.1 -decorator==5.0.7; python_version >= "3.7" +decorator==5.0.9; python_version >= "3.7" django-debug-toolbar==3.2.1; python_version >= "3.6" django-environ==0.4.5 django-rainbowtests==0.6.0 django-split-settings==1.0.1; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 -django==3.1.8; python_version >= "3.6" +django==3.1.12; python_version >= "3.6" djangorestframework==3.12.4; python_version >= "3.5" docutils==0.16; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" dulwich==0.19.16 -elasticsearch==7.12.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") +elasticsearch==7.13.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") executing==0.6.0 gunicorn==20.1.0; python_version >= "3.5" icecream==2.1.0 idna==2.10; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" idutils==1.1.8 imagesize==1.2.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" -importlib-metadata==4.0.1; python_version >= "3.6" and python_version < "3.8" -ipdb==0.13.7; python_version >= "2.7" +importlib-metadata==4.5.0; python_version >= "3.6" and python_version < "3.8" +ipdb==0.13.9; python_version >= "2.7" ipython-genutils==0.2.0; python_version >= "3.7" -ipython==7.22.0; python_version >= "3.7" +ipython==7.24.1; python_version >= "3.7" isbnid-fork==0.5.2 isodate==0.6.0 isort==5.8.0; python_version >= "3.6" and python_version < "4.0" @@ -43,6 +44,7 @@ jsonschema==3.2.0 livereload==2.6.3; python_version >= "3.6" lxml==4.6.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") markupsafe==1.1.1; python_version >= "3.5" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.5" +matplotlib-inline==0.1.2; python_version >= "3.7" mypy-extensions==0.4.3; python_version >= "3.6" packaging==20.9; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" parso==0.8.2; python_version >= "3.7" @@ -53,10 +55,10 @@ pika==1.2.0 prompt-toolkit==3.0.18; python_full_version >= "3.6.1" and python_version >= "3.7" psycopg2-binary==2.8.6; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") ptyprocess==0.7.0; sys_platform != "win32" and python_version >= "3.7" -pygments==2.8.1; python_version >= "3.7" +pygments==2.9.0; python_version >= "3.7" pyjwt==2.1.0; python_version >= "3.6" -pyoai==2.5.0 -pyparsing==2.4.7; python_version >= "3.5" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.5" +pyoai @ git+https://github.com/infrae/pyoai@5f6eba12 +pyparsing==2.4.7; python_version >= "2.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" pyrsistent==0.17.3; python_version >= "3.5" python-box==5.3.0; python_version >= "3.6" python-dateutil==2.8.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0") @@ -68,7 +70,7 @@ redis==3.5.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (py regex==2021.4.4; python_version >= "3.6" requests==2.25.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" responses==0.13.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -six==1.15.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +six==1.16.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" snowballstemmer==2.1.0; python_version >= "3.6" sphinx-autobuild==2021.3.14; python_version >= "3.6" sphinx-rtd-theme==0.5.2 @@ -86,8 +88,8 @@ toml==0.10.2; python_version > "3.6" and python_full_version < "3.0.0" or python tornado==6.1; python_version >= "3.6" traitlets==5.0.5; python_version >= "3.7" typed-ast==1.4.3; python_version >= "3.6" -typing-extensions==3.7.4.3; python_version < "3.8" and python_version >= "3.6" -urllib3==1.26.4; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" +typing-extensions==3.10.0.0; python_version < "3.8" and python_version >= "3.6" +urllib3==1.26.5; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" wcwidth==0.2.5; python_full_version >= "3.6.1" and python_version >= "3.7" xmltodict==0.12.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") zipp==3.4.1; python_version >= "3.6" and python_version < "3.8" diff --git a/src/metax_api/tests/api/oaipmh/minimal_api.py b/src/metax_api/tests/api/oaipmh/minimal_api.py index c2c5ef51..e64b0776 100755 --- a/src/metax_api/tests/api/oaipmh/minimal_api.py +++ b/src/metax_api/tests/api/oaipmh/minimal_api.py @@ -110,16 +110,23 @@ def test_list_identifiers(self): ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() - )[:ms] + ) response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc") self.assertEqual(response.status_code, status.HTTP_200_OK) headers = self._get_results(response.content, "//o:header") - self.assertTrue(len(headers) == len(allRecords), len(headers)) + self.assertTrue(len(headers) == ms, len(headers)) + + token = self._get_single_result(response.content, '//o:resumptionToken') + response = self.client.get(f"/oai/?verb=ListIdentifiers&resumptionToken={token.text}") + self.assertEqual(response.status_code, status.HTTP_200_OK) + + headers = self._get_results(response.content, "//o:header") + self.assertEqual(len(allRecords) - ms, len(headers)) response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK) headers = self._get_results(response.content, "//o:header") - self.assertTrue(len(headers) == len(allRecords), len(headers)) + self.assertTrue(len(headers) == ms, len(headers)) response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc_urnresolver") self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -157,22 +164,29 @@ def test_list_records(self): ms = settings.OAI["BATCH_SIZE"] allRecords = CatalogRecord.objects.filter( data_catalog__catalog_json__identifier__in=MetaxOAIServer._get_default_set_filter() - )[:ms] + ) response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc") self.assertEqual(response.status_code, status.HTTP_200_OK) records = self._get_results(response.content, "//o:record") - self.assertTrue(len(records) == len(allRecords)) + self.assertTrue(len(records) == ms) + + token = self._get_single_result(response.content, '//o:resumptionToken') + response = self.client.get(f"/oai/?verb=ListRecords&resumptionToken={token.text}") + self.assertEqual(response.status_code, status.HTTP_200_OK) + + records = self._get_results(response.content, "//o:record") + self.assertTrue(len(allRecords) - ms == len(records)) response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_fairdata_datacite") self.assertEqual(response.status_code, status.HTTP_200_OK) records = self._get_results(response.content, "//o:record") - self.assertTrue(len(records) == len(allRecords)) + self.assertTrue(len(records) == ms) response = self.client.get("/oai/?verb=ListRecords&metadataPrefix=oai_dc_urnresolver") self.assertEqual(response.status_code, status.HTTP_200_OK) records = self._get_results(response.content, "//o:record") - self.assertTrue(len(records) == len(allRecords)) + self.assertTrue(len(records) == ms) def test_list_records_for_drafts(self): """ Tests that drafts are not returned from ListRecords """ From 11b70f55975a8fcdf15d3facba07581b1c5c1a07 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Mon, 31 May 2021 14:57:07 +0300 Subject: [PATCH 023/160] updated test data --- .../catalog_record_test_data_template.json | 37 ++- ...og_record_test_data_template_full_att.json | 235 ++++++++++++++--- ...og_record_test_data_template_full_ida.json | 242 +++++++++++++++--- .../data_catalog_test_data_template.json | 28 +- 4 files changed, 453 insertions(+), 89 deletions(-) diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template.json b/src/metax_api/tests/testdata/catalog_record_test_data_template.json index 47038a92..748cc60f 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template.json @@ -40,7 +40,14 @@ } }, "contributor_role": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role" }] } ], @@ -53,18 +60,40 @@ "fi": "Organisaatio" }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "pref_label": { + "en": "ResearchGroup", + "fi": "TutkimusryhmƤ", + "sv": "Forskningsgrupp", + "und": "TutkimusryhmƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "sv": "engelska", + "fi": "englanti", + "und": "englanti" + } } ], "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "definition": { + "en": "A statement or formal explanation of the meaning of a concept." + }, + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "fi": "Avoin", + "en": "Open", + "und": "Avoin" + } } } }, diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json index 96d833c2..c3291c81 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json @@ -28,7 +28,13 @@ "value": 0.111, "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "sv": "engelska", + "fi": "englanti", + "und": "englanti" + } } ], "temporal": [ @@ -49,7 +55,14 @@ "POLYGON((0 0, 0 20, 40 20, 40 0, 0 0))" ], "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } }, { @@ -57,13 +70,26 @@ "alt": "60", "full_address": "The complete address written as a string, with or without formatting", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } } ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "pref_label": { + "fi": "JyvƤskylƤn yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "en": "Accelerator laboratory of the university of JyvƤskylƤ", + "und": "JyvƤskylƤn yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + }, + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures" } ], "access_rights": { @@ -72,28 +98,37 @@ }, "available": "2014-01-15", "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "pref_label": { - "en": "pref label for this type" - }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, - "in_scheme": "http://uri.of.filetype.concept/scheme" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "fi": "Avoin", + "en": "Open", + "und": "Avoin" }, "restriction_grounds": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "pref_label": { + "fi": "Saatavuutta rajoitettu muulla perusteella", + "en": "Restricted access due to other reasons", + "sv": "BegrƤnsad Ć„tkomst av ƶvriga skƤl", + "und": "Saatavuutta rajoitettu muulla perusteella" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds" }], "license": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" }, + "license": "https://url.of.license.which.applies.here.org", + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "description": { "en": "Free account of the rights" - }, - "license": "https://url.of.license.which.applies.here.org" + } } ], "access_url": { @@ -122,7 +157,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }, "type": { @@ -151,7 +193,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston kerƤƤjƤ", + "sv": "Datainsamling", + "und": "Aineiston kerƤƤjƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }, "type": { @@ -207,15 +256,27 @@ "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } }, "lifecycle_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, - "in_scheme": "http://uri.of.filetype.concept/scheme" + "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", + "pref_label": { + "fi": "KerƤtty", + "en": "Collected", + "und": "KerƤtty" + } }, "used_entity": [ { @@ -251,7 +312,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston kƤsittelijƤ", + "sv": "Databehandling", + "und": "Aineiston kƤsittelijƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -287,7 +355,14 @@ } ], "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "FramgĆ„ngsrik", + "und": "Onnistunut" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome" }, "outcome_description": {"fi": "Onnistui hyvin", "en": "A grand success"} }, @@ -307,14 +382,34 @@ "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "pref_label": { + "fi": "EtelƤ-Suomi", + "sv": "Sƶdra Finland", + "en": "Southern Finland Province", + "und": "EtelƤ-Suomi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "pref_label": { + "fi": "Objektin pƤivitys", + "en": "Object update", + "und": "Objektin pƤivitys" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "pref_label": { + "en": "Failure", + "fi": "EpƤonnistunut", + "sv": "Misslyckad", + "und": "EpƤonnistunut" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome" }, "outcome_description": {"fi": "EpƤonnistui", "en": "A grand failure"} } @@ -339,7 +434,14 @@ } }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }], "creator": [ @@ -355,7 +457,14 @@ } }, "contributor_role": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role" }] } ], @@ -376,7 +485,14 @@ } }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distributƶr", + "und": "Jakelija" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -418,7 +534,14 @@ } }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distributƶr", + "und": "Jakelija" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }, "contributor": [ @@ -443,7 +566,14 @@ ] }, "contributor_role": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role" }] }, { @@ -467,7 +597,14 @@ ] }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -491,7 +628,14 @@ "+358501232233" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redaktƶr", + "und": "Julkaisutoimittaja" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -508,7 +652,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "pref_label": { + "en": "Hosting Institution", + "fi": "SƤilyttƤvƤ organisaatio", + "sv": "Bevarande organisation", + "und": "SƤilyttƤvƤ organisaatio" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -539,14 +690,15 @@ }, "license": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" }, + "license": "https://url.of.license.which.applies.here.org", + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "description": { "en": "Free account of the rights" - }, - "license": "https://url.of.license.which.applies.org" + } } ], "file_type": { @@ -588,14 +740,15 @@ }, "license": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" }, + "license": "https://url.of.license.which.applies.here.org", + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "description": { "en": "Free account of the rights" - }, - "license": "https://url.of.license.which.applies.org" + } } ], "resource_type": { @@ -690,4 +843,4 @@ "api_meta": { "version": 1 } -} \ No newline at end of file +}} \ No newline at end of file diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json index 8fc1fd54..f9eb1a76 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json @@ -28,7 +28,13 @@ "value": 0.111, "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "sv": "engelska", + "fi": "englanti", + "und": "englanti" + } } ], "temporal": [ @@ -49,7 +55,14 @@ "POLYGON((0 0, 0 20, 40 20, 40 0, 0 0))" ], "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } }, { @@ -57,13 +70,26 @@ "alt": "60", "full_address": "The complete address written as a string, with or without formatting", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } } ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "pref_label": { + "fi": "JyvƤskylƤn yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "en": "Accelerator laboratory of the university of JyvƤskylƤ", + "und": "JyvƤskylƤn yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + }, + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures" } ], "access_rights": { @@ -72,43 +98,58 @@ }, "available": "2014-01-15", "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "pref_label": { - "en": "pref label for this type" - }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, - "in_scheme": "http://uri.of.filetype.concept/scheme" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "fi": "Avoin", + "en": "Open", + "und": "Avoin" + } }, "restriction_grounds": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "pref_label": { + "fi": "Saatavuutta rajoitettu muulla perusteella", + "en": "Restricted access due to other reasons", + "sv": "BegrƤnsad Ć„tkomst av ƶvriga skƤl", + "und": "Saatavuutta rajoitettu muulla perusteella" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds" }], "license": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" }, + "license": "https://url.of.license.which.applies.here.org", + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "description": { "en": "Free account of the rights" - }, - "license": "https://url.of.license.which.applies.here.org" + } }, { - "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-NC-2.0", "title": { - "en": "A name given to the resource" + "fi": "Creative Commons NimeƤ-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)", + "en": "Creative Commons Attribution-NonCommercial 2.0 Generic (CC BY-NC 2.0", + "und": "Creative Commons NimeƤ-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)" }, + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-NC-2.0", "description": { "en": "Free account of the rights" - } + }, + "license": "https://creativecommons.org/licenses/by-nc/2.0/" }, { - "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/other", "title": { - "en": "A name given to the resource" - } + "fi": "Muu", + "en": "Other", + "und": "Muu" + }, + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/other" } ], "access_url": { @@ -137,7 +178,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }, "type": { @@ -166,7 +214,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston kerƤƤjƤ", + "sv": "Datainsamling", + "und": "Aineiston kerƤƤjƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }, "type": { @@ -232,15 +287,27 @@ "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } }, "lifecycle_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, - "in_scheme": "http://uri.of.filetype.concept/scheme" + "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", + "pref_label": { + "fi": "KerƤtty", + "en": "Collected", + "und": "KerƤtty" + } }, "used_entity": [ { @@ -276,7 +343,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston kƤsittelijƤ", + "sv": "Databehandling", + "und": "Aineiston kƤsittelijƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -312,7 +386,14 @@ } ], "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "FramgĆ„ngsrik", + "und": "Onnistunut" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome" }, "outcome_description": {"fi": "Onnistui hyvin", "en": "A grand success"} }, @@ -332,14 +413,34 @@ "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "pref_label": { + "fi": "EtelƤ-Suomi", + "sv": "Sƶdra Finland", + "en": "Southern Finland Province", + "und": "EtelƤ-Suomi" + }, + "in_scheme": "http://www.yso.fi/onto/yso/places" } }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "pref_label": { + "fi": "Objektin pƤivitys", + "en": "Object update", + "und": "Objektin pƤivitys" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "pref_label": { + "en": "Failure", + "fi": "EpƤonnistunut", + "sv": "Misslyckad", + "und": "EpƤonnistunut" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome" }, "outcome_description": {"fi": "EpƤonnistui", "en": "A grand failure"} } @@ -364,7 +465,14 @@ } }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }], "creator": [ @@ -380,7 +488,14 @@ } }, "contributor_role": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role" }] } ], @@ -402,10 +517,24 @@ }, "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distributƶr", + "und": "Jakelija" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }, { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor", + "pref_label": { + "en": "Sponsor", + "fi": "Sponsori", + "sv": "Sponsor", + "und": "Sponsori" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" } ] } @@ -448,7 +577,14 @@ } }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distributƶr", + "und": "Jakelija" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] }, "contributor": [ @@ -473,7 +609,14 @@ ] }, "contributor_role": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role" }] }, { @@ -497,7 +640,14 @@ ] }, "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -521,7 +671,14 @@ "+358501232233" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redaktƶr", + "und": "Julkaisutoimittaja" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], @@ -538,7 +695,14 @@ "+358501231235" ], "contributor_type": [{ - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "pref_label": { + "en": "Hosting Institution", + "fi": "SƤilyttƤvƤ organisaatio", + "sv": "Bevarande organisation", + "und": "SƤilyttƤvƤ organisaatio" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type" }] } ], diff --git a/src/metax_api/tests/testdata/data_catalog_test_data_template.json b/src/metax_api/tests/testdata/data_catalog_test_data_template.json index 391c33fe..a615f4e0 100755 --- a/src/metax_api/tests/testdata/data_catalog_test_data_template.json +++ b/src/metax_api/tests/testdata/data_catalog_test_data_template.json @@ -9,10 +9,22 @@ "harvested": false, "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "sv": "finska", + "en": "Finnish", + "fi": "suomi", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "sv": "engelska", + "fi": "englanti", + "und": "englanti" + } } ], "homepage": [ @@ -73,10 +85,15 @@ ], "access_type": [ { + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "definition": { + "en": "A statement or formal explanation of the meaning of a concept." + }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "fi": "Avoin", - "en": "Open" + "en": "Open", + "und": "Avoin" } } ], @@ -84,8 +101,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "fi": "CC BY 4.0", - "en": "CC BY 4.0" + "fi": "Creative Commons NimeƤ 4.0 KansainvƤlinen (CC BY 4.0)", + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "und": "Creative Commons NimeƤ 4.0 KansainvƤlinen (CC BY 4.0)" } } ] From f6f1915bacd9ef5914b2ebe0959564bbd55d2764 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Wed, 2 Jun 2021 10:27:19 +0300 Subject: [PATCH 024/160] updated test data and regenerated test data file --- .gitignore | 2 + .../catalog_record_test_data_template.json | 3 - ...og_record_test_data_template_full_att.json | 6 +- ...og_record_test_data_template_full_ida.json | 3 - .../data_catalog_test_data_template.json | 4 - src/metax_api/tests/testdata/test_data.json | 2110 ++++++++++++++--- 6 files changed, 1813 insertions(+), 315 deletions(-) diff --git a/.gitignore b/.gitignore index 53a6ee7d..54fa40cb 100755 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,5 @@ coverage.xml .hypothesis/ .pytest_cache/ pytestdebug.log + +venv diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template.json b/src/metax_api/tests/testdata/catalog_record_test_data_template.json index 748cc60f..2c518cfd 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template.json @@ -85,9 +85,6 @@ "access_rights": { "access_type": { "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "fi": "Avoin", diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json index c3291c81..4bf26350 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json @@ -99,14 +99,12 @@ "available": "2014-01-15", "access_type": { "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "fi": "Avoin", "en": "Open", "und": "Avoin" + } }, "restriction_grounds": [{ "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", @@ -843,4 +841,4 @@ "api_meta": { "version": 1 } -}} \ No newline at end of file +} \ No newline at end of file diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json index f9eb1a76..bc9c3d89 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json @@ -99,9 +99,6 @@ "available": "2014-01-15", "access_type": { "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "fi": "Avoin", diff --git a/src/metax_api/tests/testdata/data_catalog_test_data_template.json b/src/metax_api/tests/testdata/data_catalog_test_data_template.json index a615f4e0..decbf52a 100755 --- a/src/metax_api/tests/testdata/data_catalog_test_data_template.json +++ b/src/metax_api/tests/testdata/data_catalog_test_data_template.json @@ -85,10 +85,6 @@ ], "access_type": [ { - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "fi": "Avoin", diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index c9bac66e..b7c94240 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -4426,7 +4426,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -4457,8 +4458,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -4494,10 +4496,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -4542,7 +4556,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -4573,8 +4588,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -4610,10 +4626,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -4658,7 +4686,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -4689,8 +4718,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -4726,10 +4756,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -4774,7 +4816,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -4805,8 +4848,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -4842,10 +4886,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -4890,7 +4946,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -4921,8 +4978,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -4958,10 +5016,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -5006,7 +5076,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -5037,8 +5108,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -5074,10 +5146,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -5122,7 +5206,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -5153,8 +5238,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -5190,10 +5276,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -5238,7 +5336,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", "pref_label": { "en": "Open", - "fi": "Avoin" + "fi": "Avoin", + "und": "Avoin" } } ], @@ -5269,8 +5368,9 @@ { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0", "title": { - "en": "CC BY 4.0", - "fi": "CC BY 4.0" + "en": "Creative Commons Attribution 4.0 International (CC BY 4.0)", + "fi": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)", + "und": "Creative Commons Nime\u00e4 4.0 Kansainv\u00e4linen (CC BY 4.0)" } } ] @@ -5306,10 +5406,22 @@ "issued": "2014-02-27", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/fin" + "identifier": "http://lexvo.org/id/iso639-3/fin", + "title": { + "en": "Finnish", + "fi": "suomi", + "sv": "finska", + "und": "suomi" + } }, { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "modified": "2014-01-17T08:19:58Z", @@ -5635,7 +5747,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -5643,7 +5761,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -5696,7 +5821,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d1", @@ -5753,7 +5884,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -5761,7 +5898,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -5814,7 +5958,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d2", @@ -5871,7 +6021,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -5879,7 +6035,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -5932,7 +6095,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d3", @@ -5989,7 +6158,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -5997,7 +6172,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6050,7 +6232,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d4", @@ -6107,7 +6295,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -6115,7 +6309,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6168,7 +6369,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d5", @@ -6225,7 +6432,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -6233,7 +6446,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6286,7 +6506,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d6", @@ -6343,7 +6569,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -6351,7 +6583,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6404,7 +6643,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d7", @@ -6461,7 +6706,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -6469,7 +6720,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6519,7 +6777,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d8", @@ -6576,7 +6840,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -6584,7 +6854,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6631,7 +6908,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f96d9", @@ -6689,7 +6972,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -6697,7 +6986,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -6744,7 +7040,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9610", @@ -6814,13 +7116,12 @@ "research_dataset": { "access_rights": { "access_type": { - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "in_scheme": "http://uri.of.filetype.concept/scheme", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "pref_label": { - "en": "pref label for this type" + "en": "Open", + "fi": "Avoin", + "und": "Avoin" } }, "access_url": { @@ -6844,7 +7145,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } }, { @@ -6852,20 +7154,32 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-NC-2.0", + "license": "https://creativecommons.org/licenses/by-nc/2.0/", "title": { - "en": "A name given to the resource" + "en": "Creative Commons Attribution-NonCommercial 2.0 Generic (CC BY-NC 2.0", + "fi": "Creative Commons Nime\u00e4-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)", + "und": "Creative Commons Nime\u00e4-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)" } }, { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/other", "title": { - "en": "A name given to the resource" + "en": "Other", + "fi": "Muu", + "und": "Muu" } } ], "restriction_grounds": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds", + "pref_label": { + "en": "Restricted access due to other reasons", + "fi": "Saatavuutta rajoitettu muulla perusteella", + "sv": "Begr\u00e4nsad \u00e5tkomst av \u00f6vriga sk\u00e4l", + "und": "Saatavuutta rajoitettu muulla perusteella" + } } ] }, @@ -6875,7 +7189,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + } } ], "email": "kalle.kontribuuttaaja@csc.fi", @@ -6901,7 +7222,14 @@ "@type": "Person", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + } } ], "email": "franzibald.kontribuuttaaja@csc.fi", @@ -6929,7 +7257,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + } } ], "member_of": { @@ -6948,10 +7283,24 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } }, { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Sponsor", + "fi": "Sponsori", + "sv": "Sponsor", + "und": "Sponsori" + } } ], "identifier": "id:of:curator:default", @@ -7036,7 +7385,13 @@ ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures", + "pref_label": { + "en": "Accelerator laboratory of the university of Jyv\u00e4skyl\u00e4", + "fi": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "und": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + } } ], "is_output_of": [ @@ -7050,7 +7405,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redakt\u00f6r", + "und": "Julkaisutoimittaja" + } } ], "email": "rahoitus@rahaorg.fi", @@ -7073,7 +7435,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Hosting Institution", + "fi": "S\u00e4ilytt\u00e4v\u00e4 organisaatio", + "sv": "Bevarande organisation", + "und": "S\u00e4ilytt\u00e4v\u00e4 organisaatio" + } } ], "email": "info@csc.fi", @@ -7097,7 +7466,13 @@ ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9611", @@ -7110,7 +7485,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + } } ], "email": "info@csc.fi", @@ -7141,7 +7523,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston ker\u00e4\u00e4j\u00e4", + "sv": "Datainsamling", + "und": "Aineiston ker\u00e4\u00e4j\u00e4" + } } ], "email": "info@csc.fi", @@ -7173,14 +7562,26 @@ "en": "Description of provenance activity" }, "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "Framg\u00e5ngsrik", + "und": "Onnistunut" + } }, "lifecycle_event": { "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", + "pref_label": { + "en": "Collected", + "fi": "Ker\u00e4tty", + "und": "Ker\u00e4tty" + } }, "outcome_description": { "en": "A grand success", @@ -7191,7 +7592,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, "temporal": { @@ -7258,7 +7666,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston k\u00e4sittelij\u00e4", + "sv": "Databehandling", + "und": "Aineiston k\u00e4sittelij\u00e4" + } } ], "email": "info@csc.fi", @@ -7278,21 +7693,41 @@ "en": "Description of other provenance activity" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Failure", + "fi": "Ep\u00e4onnistunut", + "sv": "Misslyckad", + "und": "Ep\u00e4onnistunut" + } }, "outcome_description": { "en": "A grand failure", "fi": "Ep\u00e4onnistui" }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event", + "pref_label": { + "en": "Object update", + "fi": "Objektin p\u00e4ivitys", + "und": "Objektin p\u00e4ivitys" + } }, "spatial": { "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Southern Finland Province", + "fi": "Etel\u00e4-Suomi", + "sv": "S\u00f6dra Finland", + "und": "Etel\u00e4-Suomi" + } } }, "temporal": { @@ -7308,7 +7743,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "email": "info@csc.fi", @@ -7426,7 +7868,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + } } ], "email": "info@csc.fi", @@ -7457,7 +7906,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, { @@ -7465,7 +7921,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name 2", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } } ], @@ -7560,13 +8023,12 @@ "research_dataset": { "access_rights": { "access_type": { - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "in_scheme": "http://uri.of.filetype.concept/scheme", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "pref_label": { - "en": "pref label for this type" + "en": "Open", + "fi": "Avoin", + "und": "Avoin" } }, "access_url": { @@ -7590,7 +8052,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } }, { @@ -7598,20 +8061,32 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-NC-2.0", + "license": "https://creativecommons.org/licenses/by-nc/2.0/", "title": { - "en": "A name given to the resource" + "en": "Creative Commons Attribution-NonCommercial 2.0 Generic (CC BY-NC 2.0", + "fi": "Creative Commons Nime\u00e4-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)", + "und": "Creative Commons Nime\u00e4-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)" } }, { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/other", "title": { - "en": "A name given to the resource" + "en": "Other", + "fi": "Muu", + "und": "Muu" } } ], "restriction_grounds": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds", + "pref_label": { + "en": "Restricted access due to other reasons", + "fi": "Saatavuutta rajoitettu muulla perusteella", + "sv": "Begr\u00e4nsad \u00e5tkomst av \u00f6vriga sk\u00e4l", + "und": "Saatavuutta rajoitettu muulla perusteella" + } } ] }, @@ -7621,7 +8096,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + } } ], "email": "kalle.kontribuuttaaja@csc.fi", @@ -7647,7 +8129,14 @@ "@type": "Person", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + } } ], "email": "franzibald.kontribuuttaaja@csc.fi", @@ -7675,7 +8164,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + } } ], "member_of": { @@ -7694,10 +8190,24 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } }, { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Sponsor", + "fi": "Sponsori", + "sv": "Sponsor", + "und": "Sponsori" + } } ], "identifier": "id:of:curator:default", @@ -7782,7 +8292,13 @@ ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures", + "pref_label": { + "en": "Accelerator laboratory of the university of Jyv\u00e4skyl\u00e4", + "fi": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "und": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + } } ], "is_output_of": [ @@ -7796,7 +8312,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redakt\u00f6r", + "und": "Julkaisutoimittaja" + } } ], "email": "rahoitus@rahaorg.fi", @@ -7819,7 +8342,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Hosting Institution", + "fi": "S\u00e4ilytt\u00e4v\u00e4 organisaatio", + "sv": "Bevarande organisation", + "und": "S\u00e4ilytt\u00e4v\u00e4 organisaatio" + } } ], "email": "info@csc.fi", @@ -7843,7 +8373,13 @@ ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9612", @@ -7856,7 +8392,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + } } ], "email": "info@csc.fi", @@ -7887,7 +8430,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston ker\u00e4\u00e4j\u00e4", + "sv": "Datainsamling", + "und": "Aineiston ker\u00e4\u00e4j\u00e4" + } } ], "email": "info@csc.fi", @@ -7919,14 +8469,26 @@ "en": "Description of provenance activity" }, "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "Framg\u00e5ngsrik", + "und": "Onnistunut" + } }, "lifecycle_event": { "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", + "pref_label": { + "en": "Collected", + "fi": "Ker\u00e4tty", + "und": "Ker\u00e4tty" + } }, "outcome_description": { "en": "A grand success", @@ -7937,7 +8499,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, "temporal": { @@ -8004,7 +8573,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston k\u00e4sittelij\u00e4", + "sv": "Databehandling", + "und": "Aineiston k\u00e4sittelij\u00e4" + } } ], "email": "info@csc.fi", @@ -8024,21 +8600,41 @@ "en": "Description of other provenance activity" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Failure", + "fi": "Ep\u00e4onnistunut", + "sv": "Misslyckad", + "und": "Ep\u00e4onnistunut" + } }, "outcome_description": { "en": "A grand failure", "fi": "Ep\u00e4onnistui" }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event", + "pref_label": { + "en": "Object update", + "fi": "Objektin p\u00e4ivitys", + "und": "Objektin p\u00e4ivitys" + } }, "spatial": { "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Southern Finland Province", + "fi": "Etel\u00e4-Suomi", + "sv": "S\u00f6dra Finland", + "und": "Etel\u00e4-Suomi" + } } }, "temporal": { @@ -8054,7 +8650,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "email": "info@csc.fi", @@ -8172,7 +8775,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + } } ], "email": "info@csc.fi", @@ -8203,7 +8813,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, { @@ -8211,7 +8828,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name 2", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } } ], @@ -8374,13 +8998,12 @@ "research_dataset": { "access_rights": { "access_type": { - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "in_scheme": "http://uri.of.filetype.concept/scheme", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "pref_label": { - "en": "pref label for this type" + "en": "Open", + "fi": "Avoin", + "und": "Avoin" } }, "access_url": { @@ -8404,7 +9027,8 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } }, { @@ -8412,20 +9036,32 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-NC-2.0", + "license": "https://creativecommons.org/licenses/by-nc/2.0/", "title": { - "en": "A name given to the resource" + "en": "Creative Commons Attribution-NonCommercial 2.0 Generic (CC BY-NC 2.0", + "fi": "Creative Commons Nime\u00e4-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)", + "und": "Creative Commons Nime\u00e4-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)" } }, { "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/other", "title": { - "en": "A name given to the resource" + "en": "Other", + "fi": "Muu", + "und": "Muu" } } ], "restriction_grounds": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds", + "pref_label": { + "en": "Restricted access due to other reasons", + "fi": "Saatavuutta rajoitettu muulla perusteella", + "sv": "Begr\u00e4nsad \u00e5tkomst av \u00f6vriga sk\u00e4l", + "und": "Saatavuutta rajoitettu muulla perusteella" + } } ] }, @@ -8435,7 +9071,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + } } ], "email": "kalle.kontribuuttaaja@csc.fi", @@ -8461,7 +9104,14 @@ "@type": "Person", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + } } ], "email": "franzibald.kontribuuttaaja@csc.fi", @@ -8489,7 +9139,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + } } ], "member_of": { @@ -8508,10 +9165,24 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } }, { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Sponsor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Sponsor", + "fi": "Sponsori", + "sv": "Sponsor", + "und": "Sponsori" + } } ], "identifier": "id:of:curator:default", @@ -8620,7 +9291,13 @@ ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures", + "pref_label": { + "en": "Accelerator laboratory of the university of Jyv\u00e4skyl\u00e4", + "fi": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "und": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + } } ], "is_output_of": [ @@ -8634,7 +9311,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redakt\u00f6r", + "und": "Julkaisutoimittaja" + } } ], "email": "rahoitus@rahaorg.fi", @@ -8657,7 +9341,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Hosting Institution", + "fi": "S\u00e4ilytt\u00e4v\u00e4 organisaatio", + "sv": "Bevarande organisation", + "und": "S\u00e4ilytt\u00e4v\u00e4 organisaatio" + } } ], "email": "info@csc.fi", @@ -8681,7 +9372,13 @@ ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9613", @@ -8694,7 +9391,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + } } ], "email": "info@csc.fi", @@ -8725,7 +9429,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston ker\u00e4\u00e4j\u00e4", + "sv": "Datainsamling", + "und": "Aineiston ker\u00e4\u00e4j\u00e4" + } } ], "email": "info@csc.fi", @@ -8757,14 +9468,26 @@ "en": "Description of provenance activity" }, "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "Framg\u00e5ngsrik", + "und": "Onnistunut" + } }, "lifecycle_event": { "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", + "pref_label": { + "en": "Collected", + "fi": "Ker\u00e4tty", + "und": "Ker\u00e4tty" + } }, "outcome_description": { "en": "A grand success", @@ -8775,7 +9498,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, "temporal": { @@ -8842,7 +9572,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston k\u00e4sittelij\u00e4", + "sv": "Databehandling", + "und": "Aineiston k\u00e4sittelij\u00e4" + } } ], "email": "info@csc.fi", @@ -8862,21 +9599,41 @@ "en": "Description of other provenance activity" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Failure", + "fi": "Ep\u00e4onnistunut", + "sv": "Misslyckad", + "und": "Ep\u00e4onnistunut" + } }, "outcome_description": { "en": "A grand failure", "fi": "Ep\u00e4onnistui" }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event", + "pref_label": { + "en": "Object update", + "fi": "Objektin p\u00e4ivitys", + "und": "Objektin p\u00e4ivitys" + } }, "spatial": { "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Southern Finland Province", + "fi": "Etel\u00e4-Suomi", + "sv": "S\u00f6dra Finland", + "und": "Etel\u00e4-Suomi" + } } }, "temporal": { @@ -8892,7 +9649,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "email": "info@csc.fi", @@ -9010,7 +9774,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + } } ], "email": "info@csc.fi", @@ -9041,7 +9812,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, { @@ -9049,7 +9827,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name 2", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } } ], @@ -9120,7 +9905,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9128,7 +9919,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9145,7 +9943,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "ResearchGroup", + "fi": "Tutkimusryhm\u00e4", + "sv": "Forskningsgrupp", + "und": "Tutkimusryhm\u00e4" + } } ], "identifier": "10076-E700", @@ -9161,7 +9966,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9614", @@ -9233,7 +10044,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9241,7 +10058,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9258,7 +10082,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "ResearchGroup", + "fi": "Tutkimusryhm\u00e4", + "sv": "Forskningsgrupp", + "und": "Tutkimusryhm\u00e4" + } } ], "identifier": "10076-E700", @@ -9274,7 +10105,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9615", @@ -9346,7 +10183,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9354,7 +10197,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9371,7 +10221,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "ResearchGroup", + "fi": "Tutkimusryhm\u00e4", + "sv": "Forskningsgrupp", + "und": "Tutkimusryhm\u00e4" + } } ], "identifier": "10076-E700", @@ -9387,7 +10244,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9616", @@ -9459,7 +10322,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9467,7 +10336,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9484,7 +10360,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "ResearchGroup", + "fi": "Tutkimusryhm\u00e4", + "sv": "Forskningsgrupp", + "und": "Tutkimusryhm\u00e4" + } } ], "identifier": "10076-E700", @@ -9500,7 +10383,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9617", @@ -9572,7 +10461,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9580,7 +10475,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9597,7 +10499,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "ResearchGroup", + "fi": "Tutkimusryhm\u00e4", + "sv": "Forskningsgrupp", + "und": "Tutkimusryhm\u00e4" + } } ], "identifier": "10076-E700", @@ -9613,7 +10522,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9618", @@ -9685,7 +10600,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9693,7 +10614,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9710,7 +10638,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ResearchGroup", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "ResearchGroup", + "fi": "Tutkimusryhm\u00e4", + "sv": "Forskningsgrupp", + "und": "Tutkimusryhm\u00e4" + } } ], "identifier": "10076-E700", @@ -9726,7 +10661,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9619", @@ -9798,7 +10739,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9806,7 +10753,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9837,7 +10791,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9620", @@ -9909,7 +10869,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -9917,7 +10883,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -9948,7 +10921,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9621", @@ -10020,7 +10999,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -10028,7 +11013,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -10059,7 +11051,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9622", @@ -10131,7 +11129,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -10139,7 +11143,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -10170,7 +11181,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9623", @@ -10241,13 +11258,12 @@ "research_dataset": { "access_rights": { "access_type": { - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "in_scheme": "http://uri.of.filetype.concept/scheme", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "pref_label": { - "en": "pref label for this type" + "en": "Open", + "fi": "Avoin", + "und": "Avoin" } }, "access_url": { @@ -10271,13 +11287,21 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], "restriction_grounds": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds", + "pref_label": { + "en": "Restricted access due to other reasons", + "fi": "Saatavuutta rajoitettu muulla perusteella", + "sv": "Begr\u00e4nsad \u00e5tkomst av \u00f6vriga sk\u00e4l", + "und": "Saatavuutta rajoitettu muulla perusteella" + } } ] }, @@ -10287,7 +11311,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + } } ], "email": "kalle.kontribuuttaaja@csc.fi", @@ -10313,7 +11344,14 @@ "@type": "Person", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + } } ], "email": "franzibald.kontribuuttaaja@csc.fi", @@ -10341,7 +11379,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + } } ], "member_of": { @@ -10360,7 +11405,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "identifier": "id:of:curator:default", @@ -10395,7 +11447,13 @@ ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures", + "pref_label": { + "en": "Accelerator laboratory of the university of Jyv\u00e4skyl\u00e4", + "fi": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "und": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + } } ], "is_output_of": [ @@ -10409,7 +11467,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redakt\u00f6r", + "und": "Julkaisutoimittaja" + } } ], "email": "rahoitus@rahaorg.fi", @@ -10432,7 +11497,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Hosting Institution", + "fi": "S\u00e4ilytt\u00e4v\u00e4 organisaatio", + "sv": "Bevarande organisation", + "und": "S\u00e4ilytt\u00e4v\u00e4 organisaatio" + } } ], "email": "info@csc.fi", @@ -10456,7 +11528,13 @@ ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9624", @@ -10469,7 +11547,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + } } ], "email": "info@csc.fi", @@ -10500,7 +11585,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston ker\u00e4\u00e4j\u00e4", + "sv": "Datainsamling", + "und": "Aineiston ker\u00e4\u00e4j\u00e4" + } } ], "email": "info@csc.fi", @@ -10532,14 +11624,26 @@ "en": "Description of provenance activity" }, "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "Framg\u00e5ngsrik", + "und": "Onnistunut" + } }, "lifecycle_event": { "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", + "pref_label": { + "en": "Collected", + "fi": "Ker\u00e4tty", + "und": "Ker\u00e4tty" + } }, "outcome_description": { "en": "A grand success", @@ -10550,7 +11654,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, "temporal": { @@ -10617,7 +11728,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston k\u00e4sittelij\u00e4", + "sv": "Databehandling", + "und": "Aineiston k\u00e4sittelij\u00e4" + } } ], "email": "info@csc.fi", @@ -10637,21 +11755,41 @@ "en": "Description of other provenance activity" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Failure", + "fi": "Ep\u00e4onnistunut", + "sv": "Misslyckad", + "und": "Ep\u00e4onnistunut" + } }, "outcome_description": { "en": "A grand failure", "fi": "Ep\u00e4onnistui" }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event", + "pref_label": { + "en": "Object update", + "fi": "Objektin p\u00e4ivitys", + "und": "Objektin p\u00e4ivitys" + } }, "spatial": { "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Southern Finland Province", + "fi": "Etel\u00e4-Suomi", + "sv": "S\u00f6dra Finland", + "und": "Etel\u00e4-Suomi" + } } }, "temporal": { @@ -10667,7 +11805,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "email": "info@csc.fi", @@ -10817,9 +11962,10 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", - "license": "https://url.of.license.which.applies.org", + "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], @@ -10853,9 +11999,10 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", - "license": "https://url.of.license.which.applies.org", + "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], @@ -10879,7 +12026,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + } } ], "email": "info@csc.fi", @@ -10910,7 +12064,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, { @@ -10918,7 +12079,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name 2", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } } ], @@ -10978,13 +12146,12 @@ "research_dataset": { "access_rights": { "access_type": { - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "in_scheme": "http://uri.of.filetype.concept/scheme", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "pref_label": { - "en": "pref label for this type" + "en": "Open", + "fi": "Avoin", + "und": "Avoin" } }, "access_url": { @@ -11008,13 +12175,21 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], "restriction_grounds": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds", + "pref_label": { + "en": "Restricted access due to other reasons", + "fi": "Saatavuutta rajoitettu muulla perusteella", + "sv": "Begr\u00e4nsad \u00e5tkomst av \u00f6vriga sk\u00e4l", + "und": "Saatavuutta rajoitettu muulla perusteella" + } } ] }, @@ -11024,7 +12199,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + } } ], "email": "kalle.kontribuuttaaja@csc.fi", @@ -11050,7 +12232,14 @@ "@type": "Person", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + } } ], "email": "franzibald.kontribuuttaaja@csc.fi", @@ -11078,7 +12267,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + } } ], "member_of": { @@ -11097,7 +12293,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "identifier": "id:of:curator:default", @@ -11132,7 +12335,13 @@ ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures", + "pref_label": { + "en": "Accelerator laboratory of the university of Jyv\u00e4skyl\u00e4", + "fi": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "und": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + } } ], "is_output_of": [ @@ -11146,7 +12355,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redakt\u00f6r", + "und": "Julkaisutoimittaja" + } } ], "email": "rahoitus@rahaorg.fi", @@ -11169,7 +12385,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Hosting Institution", + "fi": "S\u00e4ilytt\u00e4v\u00e4 organisaatio", + "sv": "Bevarande organisation", + "und": "S\u00e4ilytt\u00e4v\u00e4 organisaatio" + } } ], "email": "info@csc.fi", @@ -11193,7 +12416,13 @@ ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9625", @@ -11206,7 +12435,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + } } ], "email": "info@csc.fi", @@ -11237,7 +12473,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston ker\u00e4\u00e4j\u00e4", + "sv": "Datainsamling", + "und": "Aineiston ker\u00e4\u00e4j\u00e4" + } } ], "email": "info@csc.fi", @@ -11269,14 +12512,26 @@ "en": "Description of provenance activity" }, "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "Framg\u00e5ngsrik", + "und": "Onnistunut" + } }, "lifecycle_event": { "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", + "pref_label": { + "en": "Collected", + "fi": "Ker\u00e4tty", + "und": "Ker\u00e4tty" + } }, "outcome_description": { "en": "A grand success", @@ -11287,7 +12542,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, "temporal": { @@ -11354,7 +12616,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston k\u00e4sittelij\u00e4", + "sv": "Databehandling", + "und": "Aineiston k\u00e4sittelij\u00e4" + } } ], "email": "info@csc.fi", @@ -11374,21 +12643,41 @@ "en": "Description of other provenance activity" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Failure", + "fi": "Ep\u00e4onnistunut", + "sv": "Misslyckad", + "und": "Ep\u00e4onnistunut" + } }, "outcome_description": { "en": "A grand failure", "fi": "Ep\u00e4onnistui" }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event", + "pref_label": { + "en": "Object update", + "fi": "Objektin p\u00e4ivitys", + "und": "Objektin p\u00e4ivitys" + } }, "spatial": { "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Southern Finland Province", + "fi": "Etel\u00e4-Suomi", + "sv": "S\u00f6dra Finland", + "und": "Etel\u00e4-Suomi" + } } }, "temporal": { @@ -11404,7 +12693,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "email": "info@csc.fi", @@ -11554,9 +12850,10 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", - "license": "https://url.of.license.which.applies.org", + "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], @@ -11590,9 +12887,10 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", - "license": "https://url.of.license.which.applies.org", + "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], @@ -11616,7 +12914,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + } } ], "email": "info@csc.fi", @@ -11647,7 +12952,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, { @@ -11655,7 +12967,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name 2", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } } ], @@ -11715,13 +13034,12 @@ "research_dataset": { "access_rights": { "access_type": { - "definition": { - "en": "A statement or formal explanation of the meaning of a concept." - }, "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "in_scheme": "http://uri.of.filetype.concept/scheme", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", "pref_label": { - "en": "pref label for this type" + "en": "Open", + "fi": "Avoin", + "und": "Avoin" } }, "access_url": { @@ -11745,13 +13063,21 @@ "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], "restriction_grounds": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other" + "identifier": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds/code/other", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/restriction_grounds", + "pref_label": { + "en": "Restricted access due to other reasons", + "fi": "Saatavuutta rajoitettu muulla perusteella", + "sv": "Begr\u00e4nsad \u00e5tkomst av \u00f6vriga sk\u00e4l", + "und": "Saatavuutta rajoitettu muulla perusteella" + } } ] }, @@ -11761,7 +13087,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/funding_acquisition", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Funding acquisition", + "fi": "Rahoituksen hankinta", + "sv": "Anskaffande av finansiering", + "und": "Rahoituksen hankinta" + } } ], "email": "kalle.kontribuuttaaja@csc.fi", @@ -11787,7 +13120,14 @@ "@type": "Person", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ProjectLeader", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Project Leader", + "fi": "Projektin johtaja", + "sv": "Projektledare", + "und": "Projektin johtaja" + } } ], "email": "franzibald.kontribuuttaaja@csc.fi", @@ -11815,7 +13155,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/conceptualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Conceptualization", + "fi": "Tutkimuksen muotoilu", + "sv": "Formulering av forskningen", + "und": "Tutkimuksen muotoilu" + } } ], "member_of": { @@ -11834,7 +13181,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "identifier": "id:of:curator:default", @@ -11869,7 +13223,13 @@ ], "infrastructure": [ { - "identifier": "urn-nbn-fi-research-infras-2016072530" + "identifier": "urn-nbn-fi-research-infras-2016072530", + "in_scheme": "https://avaa.tdata.fi/api/jsonws/tupa-portlet.Infrastructures/get-all-infrastructures", + "pref_label": { + "en": "Accelerator laboratory of the university of Jyv\u00e4skyl\u00e4", + "fi": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio", + "und": "Jyv\u00e4skyl\u00e4n yliopiston fysiikan laitoksen Kiihdytinlaboratorio" + } } ], "is_output_of": [ @@ -11883,7 +13243,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Editor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Editor", + "fi": "Julkaisutoimittaja", + "sv": "Redakt\u00f6r", + "und": "Julkaisutoimittaja" + } } ], "email": "rahoitus@rahaorg.fi", @@ -11906,7 +13273,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/HostingInstitution", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Hosting Institution", + "fi": "S\u00e4ilytt\u00e4v\u00e4 organisaatio", + "sv": "Bevarande organisation", + "und": "S\u00e4ilytt\u00e4v\u00e4 organisaatio" + } } ], "email": "info@csc.fi", @@ -11930,7 +13304,13 @@ ], "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9626", @@ -11943,7 +13323,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/ContactPerson", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Point of contact", + "fi": "Yhteystiedot", + "sv": "Kontaktuppgifter", + "und": "Yhteystiedot" + } } ], "email": "info@csc.fi", @@ -11974,7 +13361,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCollector", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Collector", + "fi": "Aineiston ker\u00e4\u00e4j\u00e4", + "sv": "Datainsamling", + "und": "Aineiston ker\u00e4\u00e4j\u00e4" + } } ], "email": "info@csc.fi", @@ -12006,14 +13400,26 @@ "en": "Description of provenance activity" }, "event_outcome": { - "identifier": "Success" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/success", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Success", + "fi": "Onnistunut", + "sv": "Framg\u00e5ngsrik", + "und": "Onnistunut" + } }, "lifecycle_event": { "definition": { "en": "A statement or formal explanation of the meaning of a concept." }, "identifier": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event/code/collected", - "in_scheme": "http://uri.of.filetype.concept/scheme" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/lifecycle_event", + "pref_label": { + "en": "Collected", + "fi": "Ker\u00e4tty", + "und": "Ker\u00e4tty" + } }, "outcome_description": { "en": "A grand success", @@ -12024,7 +13430,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, "temporal": { @@ -12091,7 +13504,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataCurator", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Curator", + "fi": "Aineiston k\u00e4sittelij\u00e4", + "sv": "Databehandling", + "und": "Aineiston k\u00e4sittelij\u00e4" + } } ], "email": "info@csc.fi", @@ -12111,21 +13531,41 @@ "en": "Description of other provenance activity" }, "event_outcome": { - "identifier": "Failure" + "identifier": "http://uri.suomi.fi/codelist/fairdata/event_outcome/code/failure", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/event_outcome", + "pref_label": { + "en": "Failure", + "fi": "Ep\u00e4onnistunut", + "sv": "Misslyckad", + "und": "Ep\u00e4onnistunut" + } }, "outcome_description": { "en": "A grand failure", "fi": "Ep\u00e4onnistui" }, "preservation_event": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd" + "identifier": "http://uri.suomi.fi/codelist/fairdata/preservation_event/code/upd", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/preservation_event", + "pref_label": { + "en": "Object update", + "fi": "Objektin p\u00e4ivitys", + "und": "Objektin p\u00e4ivitys" + } }, "spatial": { "alt": "11.111", "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p105917" + "identifier": "http://www.yso.fi/onto/yso/p105917", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Southern Finland Province", + "fi": "Etel\u00e4-Suomi", + "sv": "S\u00f6dra Finland", + "und": "Etel\u00e4-Suomi" + } } }, "temporal": { @@ -12141,7 +13581,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/Distributor", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Distributor", + "fi": "Jakelija", + "sv": "Distribut\u00f6r", + "und": "Jakelija" + } } ], "email": "info@csc.fi", @@ -12291,9 +13738,10 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", - "license": "https://url.of.license.which.applies.org", + "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], @@ -12327,9 +13775,10 @@ "en": "Free account of the rights" }, "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/Apache-2.0", - "license": "https://url.of.license.which.applies.org", + "license": "https://url.of.license.which.applies.here.org", "title": { - "en": "A name given to the resource" + "en": "Apache Software License 2.0", + "und": "Apache Software License 2.0" } } ], @@ -12353,7 +13802,14 @@ "@type": "Organization", "contributor_type": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_type/code/DataManager", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_type", + "pref_label": { + "en": "Data Manager", + "fi": "Aineiston hallinnoija", + "sv": "Dataadministration", + "und": "Aineiston hallinnoija" + } } ], "email": "info@csc.fi", @@ -12384,7 +13840,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } }, { @@ -12392,7 +13855,14 @@ "full_address": "The complete address written as a string, with or without formatting", "geographic_name": "Geographic name 2", "place_uri": { - "identifier": "http://www.yso.fi/onto/yso/p107966" + "identifier": "http://www.yso.fi/onto/yso/p107966", + "in_scheme": "http://www.yso.fi/onto/yso/places", + "pref_label": { + "en": "Olostunturi", + "fi": "Olostunturi", + "sv": "Olostunturi", + "und": "Olostunturi" + } } } ], @@ -12464,7 +13934,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -12472,7 +13948,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -12519,7 +14002,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9610-alt-1", @@ -12570,7 +14059,13 @@ "research_dataset": { "access_rights": { "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open" + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } } }, "creator": [ @@ -12578,7 +14073,14 @@ "@type": "Person", "contributor_role": [ { - "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization" + "identifier": "http://uri.suomi.fi/codelist/fairdata/contributor_role/code/visualization", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/contributor_role", + "pref_label": { + "en": "Visualization", + "fi": "Visualisointi", + "sv": "Visualisering", + "und": "Visualisointi" + } } ], "member_of": { @@ -12625,7 +14127,13 @@ "issued": "2019-01-01", "language": [ { - "identifier": "http://lexvo.org/id/iso639-3/eng" + "identifier": "http://lexvo.org/id/iso639-3/eng", + "title": { + "en": "English", + "fi": "englanti", + "sv": "engelska", + "und": "englanti" + } } ], "metadata_version_identifier": "1955e904-e3dd-4d7e-99f1-3fed446f9610-alt-2", From d8d173fb472f3bd8e266254928b20d0c11b6234b Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Fri, 4 Jun 2021 14:00:16 +0300 Subject: [PATCH 025/160] added missing test data to templates and regenerated test data file --- ...og_record_test_data_template_full_att.json | 68 +- ...og_record_test_data_template_full_ida.json | 68 +- .../tests/testdata/generate_test_data.py | 62 +- src/metax_api/tests/testdata/test_data.json | 648 +++++++++++++++--- 4 files changed, 737 insertions(+), 109 deletions(-) diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json index 4bf26350..c2faf66b 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_att.json @@ -662,7 +662,13 @@ } ], "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "pref_label": { + "fi": "Tekes", + "en": "Tekes", + "und": "Tekes" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type" } } ], @@ -774,11 +780,22 @@ }, "identifier": "external:dataset:identifier", "type": { - "identifier": "physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "pref_label": { + "fi": "Fyysinen objekti", + "en": "Physical object", + "und": "Fyysinen objekti" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "fi": "Johdettu aineistosta", + "en": "Was derived from", + "und": "Johdettu aineistosta" + } } }, { @@ -792,11 +809,22 @@ }, "identifier": "external:thing:identifier", "type": { - "identifier": "collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "pref_label": { + "fi": "Kokoelma", + "en": "Collection", + "und": "Kokoelma" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "fi": "Viittaa", + "en": "Cites", + "und": "Viittaa" + } } }, { @@ -810,11 +838,22 @@ }, "identifier": "external:dataset_2:identifier", "type": { - "identifier": "service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "pref_label": { + "fi": "Palvelu", + "en": "Service", + "und": "Palvelu" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "fi": "Kuuluu aineistoon", + "en": "Is part of", + "und": "Kuuluu aineistoon" + } } }, { @@ -828,11 +867,22 @@ }, "identifier": "external:thing_2:identifier", "type": { - "identifier": "ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "pref_label": { + "fi": "KƤyttƶliittymƤ", + "en": "User interface", + "und": "KƤyttƶliittymƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "fi": "Edellinen versio", + "en": "Has previous version", + "und": "Edellinen versio" + } } } ] diff --git a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json index bc9c3d89..b3f3ef82 100755 --- a/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json +++ b/src/metax_api/tests/testdata/catalog_record_test_data_template_full_ida.json @@ -704,7 +704,13 @@ } ], "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "pref_label": { + "fi": "Tekes", + "en": "Tekes", + "und": "Tekes" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type" } } ], @@ -770,11 +776,22 @@ }, "identifier": "external:dataset:identifier", "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "pref_label": { + "fi": "Fyysinen objekti", + "en": "Physical object", + "und": "Fyysinen objekti" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "fi": "Johdettu aineistosta", + "en": "Was derived from", + "und": "Johdettu aineistosta" + } } }, { @@ -788,11 +805,22 @@ }, "identifier": "external:thing:identifier", "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "pref_label": { + "fi": "Kokoelma", + "en": "Collection", + "und": "Kokoelma" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "fi": "Viittaa", + "en": "Cites", + "und": "Viittaa" + } } }, { @@ -806,11 +834,22 @@ }, "identifier": "external:dataset_2:identifier", "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "pref_label": { + "fi": "Palvelu", + "en": "Service", + "und": "Palvelu" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "fi": "Kuuluu aineistoon", + "en": "Is part of", + "und": "Kuuluu aineistoon" + } } }, { @@ -824,11 +863,22 @@ }, "identifier": "external:thing_2:identifier", "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "pref_label": { + "fi": "KƤyttƶliittymƤ", + "en": "User interface", + "und": "KƤyttƶliittymƤ" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type" } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "fi": "Edellinen versio", + "en": "Has previous version", + "und": "Edellinen versio" + } } } ] diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index b841483d..4b4931e8 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -454,34 +454,82 @@ def generate_catalog_records( # first fifth of files dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", + "pref_label": { + "fi": "Teksti", + "en": "Text", + "und": "Teksti" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" } dataset_files[-1]["use_category"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", + "pref_label": { + "fi": "LƤhdeaineisto", + "en": "Source material", + "und": "LƤhdeaineisto" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" } elif file_divider <= j < (file_divider * 2): # second fifth of files dataset_files[-1]["file_type"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video", + "pref_label": { + "fi": "Video", + "en": "Video", + "und": "Video" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" } dataset_files[-1]["use_category"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome", + "pref_label": { + "fi": "Tulosaineisto", + "en": "Outcome material", + "und": "Tulosaineisto" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" } elif (file_divider * 2) <= j < (file_divider * 3): # third fifth of files dataset_files[-1]["file_type"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image", + "pref_label": { + "fi": "Kuva", + "en": "Image", + "und": "Kuva" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" } dataset_files[-1]["use_category"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication", + "pref_label": { + "fi": "Julkaisu", + "en": "Publication", + "und": "Julkaisu" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" } elif (file_divider * 3) <= j < (file_divider * 4): # fourth fifth of files dataset_files[-1]["file_type"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code", + "pref_label": { + "fi": "LƤhdekoodi", + "en": "Source code", + "und": "LƤhdekoodi" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" } dataset_files[-1]["use_category"] = { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation", + "pref_label": { + "fi": "Dokumentaatio", + "en": "Documentation", + "und": "Dokumentaatio" + }, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" } else: # the rest of files diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index b7c94240..dbe75ff1 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5799,22 +5799,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Text", + "fi": "Teksti", + "und": "Teksti" + } }, "identifier": "pid:urn:1", "title": "File metadata title 1", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Source material", + "fi": "L\u00e4hdeaineisto", + "und": "L\u00e4hdeaineisto" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Text", + "fi": "Teksti", + "und": "Teksti" + } }, "identifier": "pid:urn:2", "title": "File metadata title 2", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Source material", + "fi": "L\u00e4hdeaineisto", + "und": "L\u00e4hdeaineisto" + } } } ], @@ -5936,22 +5960,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Text", + "fi": "Teksti", + "und": "Teksti" + } }, "identifier": "pid:urn:3", "title": "File metadata title 3", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Source material", + "fi": "L\u00e4hdeaineisto", + "und": "L\u00e4hdeaineisto" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Video", + "fi": "Video", + "und": "Video" + } }, "identifier": "pid:urn:4", "title": "File metadata title 4", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Outcome material", + "fi": "Tulosaineisto", + "und": "Tulosaineisto" + } } } ], @@ -6073,22 +6121,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Video", + "fi": "Video", + "und": "Video" + } }, "identifier": "pid:urn:5", "title": "File metadata title 5", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Outcome material", + "fi": "Tulosaineisto", + "und": "Tulosaineisto" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Video", + "fi": "Video", + "und": "Video" + } }, "identifier": "pid:urn:6", "title": "File metadata title 6", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Outcome material", + "fi": "Tulosaineisto", + "und": "Tulosaineisto" + } } } ], @@ -6210,22 +6282,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Video", + "fi": "Video", + "und": "Video" + } }, "identifier": "pid:urn:7", "title": "File metadata title 7", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Outcome material", + "fi": "Tulosaineisto", + "und": "Tulosaineisto" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Image", + "fi": "Kuva", + "und": "Kuva" + } }, "identifier": "pid:urn:8", "title": "File metadata title 8", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Publication", + "fi": "Julkaisu", + "und": "Julkaisu" + } } } ], @@ -6347,22 +6443,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Image", + "fi": "Kuva", + "und": "Kuva" + } }, "identifier": "pid:urn:9", "title": "File metadata title 9", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Publication", + "fi": "Julkaisu", + "und": "Julkaisu" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Image", + "fi": "Kuva", + "und": "Kuva" + } }, "identifier": "pid:urn:10", "title": "File metadata title 10", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Publication", + "fi": "Julkaisu", + "und": "Julkaisu" + } } } ], @@ -6484,22 +6604,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Image", + "fi": "Kuva", + "und": "Kuva" + } }, "identifier": "pid:urn:11", "title": "File metadata title 11", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Publication", + "fi": "Julkaisu", + "und": "Julkaisu" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Source code", + "fi": "L\u00e4hdekoodi", + "und": "L\u00e4hdekoodi" + } }, "identifier": "pid:urn:12", "title": "File metadata title 12", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Documentation", + "fi": "Dokumentaatio", + "und": "Dokumentaatio" + } } } ], @@ -6621,22 +6765,46 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Source code", + "fi": "L\u00e4hdekoodi", + "und": "L\u00e4hdekoodi" + } }, "identifier": "pid:urn:13", "title": "File metadata title 13", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Documentation", + "fi": "Dokumentaatio", + "und": "Dokumentaatio" + } } }, { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Source code", + "fi": "L\u00e4hdekoodi", + "und": "L\u00e4hdekoodi" + } }, "identifier": "pid:urn:14", "title": "File metadata title 14", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Documentation", + "fi": "Dokumentaatio", + "und": "Dokumentaatio" + } } } ], @@ -6758,12 +6926,24 @@ "files": [ { "file_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code" + "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/source_code", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", + "pref_label": { + "en": "Source code", + "fi": "L\u00e4hdekoodi", + "und": "L\u00e4hdekoodi" + } }, "identifier": "pid:urn:15", "title": "File metadata title 15", "use_category": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation" + "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", + "pref_label": { + "en": "Documentation", + "fi": "Dokumentaatio", + "und": "Dokumentaatio" + } } }, { @@ -7397,7 +7577,13 @@ "is_output_of": [ { "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type", + "pref_label": { + "en": "Tekes", + "fi": "Tekes", + "und": "Tekes" + } }, "has_funder_identifier": "funderprojectidentifier", "has_funding_agency": [ @@ -7801,11 +7987,22 @@ "fi": "Ulkopuolinen aineisto 1" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Physical object", + "fi": "Fyysinen objekti", + "und": "Fyysinen objekti" + } } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "en": "Was derived from", + "fi": "Johdettu aineistosta", + "und": "Johdettu aineistosta" + } } }, { @@ -7819,11 +8016,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Collection", + "fi": "Kokoelma", + "und": "Kokoelma" + } } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "en": "Cites", + "fi": "Viittaa", + "und": "Viittaa" + } } }, { @@ -7837,11 +8045,22 @@ "fi": "Ulkopuolinen aineisto 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Service", + "fi": "Palvelu", + "und": "Palvelu" + } } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "en": "Is part of", + "fi": "Kuuluu aineistoon", + "und": "Kuuluu aineistoon" + } } }, { @@ -7855,11 +8074,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "User interface", + "fi": "K\u00e4ytt\u00f6liittym\u00e4", + "und": "K\u00e4ytt\u00f6liittym\u00e4" + } } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "en": "Has previous version", + "fi": "Edellinen versio", + "und": "Edellinen versio" + } } } ], @@ -8304,7 +8534,13 @@ "is_output_of": [ { "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type", + "pref_label": { + "en": "Tekes", + "fi": "Tekes", + "und": "Tekes" + } }, "has_funder_identifier": "funderprojectidentifier", "has_funding_agency": [ @@ -8708,11 +8944,22 @@ "fi": "Ulkopuolinen aineisto 1" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Physical object", + "fi": "Fyysinen objekti", + "und": "Fyysinen objekti" + } } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "en": "Was derived from", + "fi": "Johdettu aineistosta", + "und": "Johdettu aineistosta" + } } }, { @@ -8726,11 +8973,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Collection", + "fi": "Kokoelma", + "und": "Kokoelma" + } } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "en": "Cites", + "fi": "Viittaa", + "und": "Viittaa" + } } }, { @@ -8744,11 +9002,22 @@ "fi": "Ulkopuolinen aineisto 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Service", + "fi": "Palvelu", + "und": "Palvelu" + } } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "en": "Is part of", + "fi": "Kuuluu aineistoon", + "und": "Kuuluu aineistoon" + } } }, { @@ -8762,11 +9031,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "User interface", + "fi": "K\u00e4ytt\u00f6liittym\u00e4", + "und": "K\u00e4ytt\u00f6liittym\u00e4" + } } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "en": "Has previous version", + "fi": "Edellinen versio", + "und": "Edellinen versio" + } } } ], @@ -9303,7 +9583,13 @@ "is_output_of": [ { "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type", + "pref_label": { + "en": "Tekes", + "fi": "Tekes", + "und": "Tekes" + } }, "has_funder_identifier": "funderprojectidentifier", "has_funding_agency": [ @@ -9707,11 +9993,22 @@ "fi": "Ulkopuolinen aineisto 1" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Physical object", + "fi": "Fyysinen objekti", + "und": "Fyysinen objekti" + } } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "en": "Was derived from", + "fi": "Johdettu aineistosta", + "und": "Johdettu aineistosta" + } } }, { @@ -9725,11 +10022,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Collection", + "fi": "Kokoelma", + "und": "Kokoelma" + } } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "en": "Cites", + "fi": "Viittaa", + "und": "Viittaa" + } } }, { @@ -9743,11 +10051,22 @@ "fi": "Ulkopuolinen aineisto 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Service", + "fi": "Palvelu", + "und": "Palvelu" + } } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "en": "Is part of", + "fi": "Kuuluu aineistoon", + "und": "Kuuluu aineistoon" + } } }, { @@ -9761,11 +10080,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "User interface", + "fi": "K\u00e4ytt\u00f6liittym\u00e4", + "und": "K\u00e4ytt\u00f6liittym\u00e4" + } } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "en": "Has previous version", + "fi": "Edellinen versio", + "und": "Edellinen versio" + } } } ], @@ -11459,7 +11789,13 @@ "is_output_of": [ { "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type", + "pref_label": { + "en": "Tekes", + "fi": "Tekes", + "und": "Tekes" + } }, "has_funder_identifier": "funderprojectidentifier", "has_funding_agency": [ @@ -11863,11 +12199,22 @@ "fi": "Ulkopuolinen aineisto 1" }, "type": { - "identifier": "physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Physical object", + "fi": "Fyysinen objekti", + "und": "Fyysinen objekti" + } } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "en": "Was derived from", + "fi": "Johdettu aineistosta", + "und": "Johdettu aineistosta" + } } }, { @@ -11881,11 +12228,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Collection", + "fi": "Kokoelma", + "und": "Kokoelma" + } } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "en": "Cites", + "fi": "Viittaa", + "und": "Viittaa" + } } }, { @@ -11899,11 +12257,22 @@ "fi": "Ulkopuolinen aineisto 2" }, "type": { - "identifier": "service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Service", + "fi": "Palvelu", + "und": "Palvelu" + } } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "en": "Is part of", + "fi": "Kuuluu aineistoon", + "und": "Kuuluu aineistoon" + } } }, { @@ -11917,11 +12286,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "User interface", + "fi": "K\u00e4ytt\u00f6liittym\u00e4", + "und": "K\u00e4ytt\u00f6liittym\u00e4" + } } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "en": "Has previous version", + "fi": "Edellinen versio", + "und": "Edellinen versio" + } } } ], @@ -12347,7 +12727,13 @@ "is_output_of": [ { "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type", + "pref_label": { + "en": "Tekes", + "fi": "Tekes", + "und": "Tekes" + } }, "has_funder_identifier": "funderprojectidentifier", "has_funding_agency": [ @@ -12751,11 +13137,22 @@ "fi": "Ulkopuolinen aineisto 1" }, "type": { - "identifier": "physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Physical object", + "fi": "Fyysinen objekti", + "und": "Fyysinen objekti" + } } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "en": "Was derived from", + "fi": "Johdettu aineistosta", + "und": "Johdettu aineistosta" + } } }, { @@ -12769,11 +13166,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Collection", + "fi": "Kokoelma", + "und": "Kokoelma" + } } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "en": "Cites", + "fi": "Viittaa", + "und": "Viittaa" + } } }, { @@ -12787,11 +13195,22 @@ "fi": "Ulkopuolinen aineisto 2" }, "type": { - "identifier": "service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Service", + "fi": "Palvelu", + "und": "Palvelu" + } } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "en": "Is part of", + "fi": "Kuuluu aineistoon", + "und": "Kuuluu aineistoon" + } } }, { @@ -12805,11 +13224,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "User interface", + "fi": "K\u00e4ytt\u00f6liittym\u00e4", + "und": "K\u00e4ytt\u00f6liittym\u00e4" + } } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "en": "Has previous version", + "fi": "Edellinen versio", + "und": "Edellinen versio" + } } } ], @@ -13235,7 +13665,13 @@ "is_output_of": [ { "funder_type": { - "identifier": "tekes" + "identifier": "http://uri.suomi.fi/codelist/fairdata/funder_type/code/tekes", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/funder_type", + "pref_label": { + "en": "Tekes", + "fi": "Tekes", + "und": "Tekes" + } }, "has_funder_identifier": "funderprojectidentifier", "has_funding_agency": [ @@ -13639,11 +14075,22 @@ "fi": "Ulkopuolinen aineisto 1" }, "type": { - "identifier": "physical_object" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/physical_object", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Physical object", + "fi": "Fyysinen objekti", + "und": "Fyysinen objekti" + } } }, "relation_type": { - "identifier": "wasDerivedFrom" + "identifier": "http://www.w3.org/ns/prov#wasDerivedFrom", + "pref_label": { + "en": "Was derived from", + "fi": "Johdettu aineistosta", + "und": "Johdettu aineistosta" + } } }, { @@ -13657,11 +14104,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "collection" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/collection", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Collection", + "fi": "Kokoelma", + "und": "Kokoelma" + } } }, "relation_type": { - "identifier": "cites" + "identifier": "http://purl.org/spar/cito/cites", + "pref_label": { + "en": "Cites", + "fi": "Viittaa", + "und": "Viittaa" + } } }, { @@ -13675,11 +14133,22 @@ "fi": "Ulkopuolinen aineisto 2" }, "type": { - "identifier": "service" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/service", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "Service", + "fi": "Palvelu", + "und": "Palvelu" + } } }, "relation_type": { - "identifier": "isPartOf" + "identifier": "http://purl.org/dc/terms/isPartOf", + "pref_label": { + "en": "Is part of", + "fi": "Kuuluu aineistoon", + "und": "Kuuluu aineistoon" + } } }, { @@ -13693,11 +14162,22 @@ "fi": "Ulkopuolinen asia 2" }, "type": { - "identifier": "ui" + "identifier": "http://uri.suomi.fi/codelist/fairdata/resource_type/code/ui", + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/resource_type", + "pref_label": { + "en": "User interface", + "fi": "K\u00e4ytt\u00f6liittym\u00e4", + "und": "K\u00e4ytt\u00f6liittym\u00e4" + } } }, "relation_type": { - "identifier": "hasPreviousVersion" + "identifier": "http://www.w3.org/ns/adms#previous", + "pref_label": { + "en": "Has previous version", + "fi": "Edellinen versio", + "und": "Edellinen versio" + } } } ], From 3e4e22811b41f7bf6a67aea1ac4e17be0747ff8b Mon Sep 17 00:00:00 2001 From: aptiaine Date: Mon, 7 Jun 2021 12:14:44 +0300 Subject: [PATCH 026/160] converting yaml files to html on startup --- src/metax_api/onappstart.py | 8 +- src/metax_api/settings/__init__.py | 2 + src/metax_api/settings/components/common.py | 3 + {swagger => src/metax_api/swagger}/README.md | 0 .../swagger}/swagger-yaml-to-html.py | 0 .../metax_api/swagger}/v1/swagger.yaml | 0 .../metax_api/swagger}/v2/swagger.yaml | 0 .../templates/swagger/v1/swagger.html | 10 +++ .../templates/swagger/v2/swagger.html | 10 +++ src/metax_api/utils/convert_yaml_to_html.py | 90 +++++++++++++++++++ 10 files changed, 122 insertions(+), 1 deletion(-) rename {swagger => src/metax_api/swagger}/README.md (100%) rename {swagger => src/metax_api/swagger}/swagger-yaml-to-html.py (100%) rename {swagger => src/metax_api/swagger}/v1/swagger.yaml (100%) rename {swagger => src/metax_api/swagger}/v2/swagger.yaml (100%) create mode 100644 src/metax_api/templates/swagger/v1/swagger.html create mode 100644 src/metax_api/templates/swagger/v2/swagger.html create mode 100644 src/metax_api/utils/convert_yaml_to_html.py diff --git a/src/metax_api/onappstart.py b/src/metax_api/onappstart.py index 24f85914..47816fc7 100755 --- a/src/metax_api/onappstart.py +++ b/src/metax_api/onappstart.py @@ -13,7 +13,7 @@ from django.conf import settings from icecream import ic -from metax_api.utils import ReferenceDataLoader, executing_test_case +from metax_api.utils import ReferenceDataLoader, executing_test_case, convert_yaml_to_html _logger = logging.getLogger(__name__) @@ -101,4 +101,10 @@ def ready(self): # pragma: no cover _logger.error(e) _logger.error("Unable to initialize RabbitMQ exchanges") + try: + convert_yaml_to_html.yaml_to_html_convert() + except Exception as e: + _logger.error(e) + _logger.error("Unable to convert swagger documentation") + _logger.info("Metax API startup tasks finished") diff --git a/src/metax_api/settings/__init__.py b/src/metax_api/settings/__init__.py index fd0410ea..cbf5c837 100755 --- a/src/metax_api/settings/__init__.py +++ b/src/metax_api/settings/__init__.py @@ -59,6 +59,8 @@ SERVER_DOMAIN_NAME=(str, "metax.fd-dev.csc.fi"), VALIDATE_TOKEN_URL=(str, "https://127.0.0.1/secure/validate_token"), WKT_FILENAME=(str, join(REFDATA_INDEXER_PATH, "resources", "uri_to_wkt.json")), + SWAGGER_YAML_PATH=(str, join(BASE_DIR, "metax_api", "swagger")), + SWAGGER_HTML_PATH=(str, join(BASE_DIR, "metax_api", "templates", "swagger")), ) # reading .env file environ.Env.read_env() diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index cbf54286..24529eb2 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -188,3 +188,6 @@ {"password": "test-fds", "username": "fds"}, {"password": "test-download", "username": "download"}, ] + +SWAGGER_YAML_PATH = env('SWAGGER_YAML_PATH') +SWAGGER_HTML_PATH = env('SWAGGER_HTML_PATH') diff --git a/swagger/README.md b/src/metax_api/swagger/README.md similarity index 100% rename from swagger/README.md rename to src/metax_api/swagger/README.md diff --git a/swagger/swagger-yaml-to-html.py b/src/metax_api/swagger/swagger-yaml-to-html.py similarity index 100% rename from swagger/swagger-yaml-to-html.py rename to src/metax_api/swagger/swagger-yaml-to-html.py diff --git a/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml similarity index 100% rename from swagger/v1/swagger.yaml rename to src/metax_api/swagger/v1/swagger.yaml diff --git a/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml similarity index 100% rename from swagger/v2/swagger.yaml rename to src/metax_api/swagger/v2/swagger.yaml diff --git a/src/metax_api/templates/swagger/v1/swagger.html b/src/metax_api/templates/swagger/v1/swagger.html new file mode 100644 index 00000000..7e8b0e71 --- /dev/null +++ b/src/metax_api/templates/swagger/v1/swagger.html @@ -0,0 +1,10 @@ + + + + + Swagger file creation failed + + + + + \ No newline at end of file diff --git a/src/metax_api/templates/swagger/v2/swagger.html b/src/metax_api/templates/swagger/v2/swagger.html new file mode 100644 index 00000000..7e8b0e71 --- /dev/null +++ b/src/metax_api/templates/swagger/v2/swagger.html @@ -0,0 +1,10 @@ + + + + + Swagger file creation failed + + + + + \ No newline at end of file diff --git a/src/metax_api/utils/convert_yaml_to_html.py b/src/metax_api/utils/convert_yaml_to_html.py new file mode 100644 index 00000000..d24afb1a --- /dev/null +++ b/src/metax_api/utils/convert_yaml_to_html.py @@ -0,0 +1,90 @@ +import logging +import json +import yaml +from os import path +from django.conf import settings + +TEMPLATE = """ + + + + + Swagger UI + + + + + + +
+ + + + + + + +""" + + +def yaml_to_html_convert(): + inpathv1 = path.join(settings.SWAGGER_YAML_PATH, 'v1', 'swagger.yaml') + inpathv2 = path.join(settings.SWAGGER_YAML_PATH, 'v2', 'swagger.yaml') + outpathv1 = path.join(settings.SWAGGER_HTML_PATH, 'v1', 'swagger.html') + outpathv2 = path.join(settings.SWAGGER_HTML_PATH, 'v2', 'swagger.html') + infile1 = open(inpathv1, 'r') + outfile1 = open(outpathv1, 'w') + spec = yaml.load(infile1, Loader=yaml.FullLoader) + if spec.get("host") =="__METAX_ENV_DOMAIN__": + spec["host"] = settings.SERVER_DOMAIN_NAME + outfile1.write(TEMPLATE % json.dumps(spec)) + infile1.close() + outfile1.close() + infile2 = open(inpathv2, 'r') + outfile2 = open(outpathv2, 'w') + spec = yaml.load(infile2, Loader=yaml.FullLoader) + if spec.get("host") =="__METAX_ENV_DOMAIN__": + spec["host"] = settings.SERVER_DOMAIN_NAME + outfile2.write(TEMPLATE % json.dumps(spec)) + infile2.close() + outfile2.close() From 0eab45c51cfa1d8cc65bf6a462f93be0f1813f75 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Wed, 9 Jun 2021 15:59:52 +0300 Subject: [PATCH 027/160] CSCFAIRMETA-1056 move swagger to Metax Api --- .../templates/swagger/v1/swagger.html | 10 --- .../templates/swagger/v2/swagger.html | 10 --- src/metax_api/utils/convert_yaml_to_html.py | 68 +++++++++++++------ src/metax_api/views/router.py | 3 + 4 files changed, 51 insertions(+), 40 deletions(-) delete mode 100644 src/metax_api/templates/swagger/v1/swagger.html delete mode 100644 src/metax_api/templates/swagger/v2/swagger.html diff --git a/src/metax_api/templates/swagger/v1/swagger.html b/src/metax_api/templates/swagger/v1/swagger.html deleted file mode 100644 index 7e8b0e71..00000000 --- a/src/metax_api/templates/swagger/v1/swagger.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - Swagger file creation failed - - - - - \ No newline at end of file diff --git a/src/metax_api/templates/swagger/v2/swagger.html b/src/metax_api/templates/swagger/v2/swagger.html deleted file mode 100644 index 7e8b0e71..00000000 --- a/src/metax_api/templates/swagger/v2/swagger.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - Swagger file creation failed - - - - - \ No newline at end of file diff --git a/src/metax_api/utils/convert_yaml_to_html.py b/src/metax_api/utils/convert_yaml_to_html.py index d24afb1a..ab4edc15 100644 --- a/src/metax_api/utils/convert_yaml_to_html.py +++ b/src/metax_api/utils/convert_yaml_to_html.py @@ -4,6 +4,8 @@ from os import path from django.conf import settings +_logger = logging.getLogger(__name__) + TEMPLATE = """ @@ -66,25 +68,51 @@ """ +FAIL_TEMPLATE = """ + + + + + Swagger file creation failed + + +

Swagger file creation failed!

+ + +""" def yaml_to_html_convert(): - inpathv1 = path.join(settings.SWAGGER_YAML_PATH, 'v1', 'swagger.yaml') - inpathv2 = path.join(settings.SWAGGER_YAML_PATH, 'v2', 'swagger.yaml') - outpathv1 = path.join(settings.SWAGGER_HTML_PATH, 'v1', 'swagger.html') - outpathv2 = path.join(settings.SWAGGER_HTML_PATH, 'v2', 'swagger.html') - infile1 = open(inpathv1, 'r') - outfile1 = open(outpathv1, 'w') - spec = yaml.load(infile1, Loader=yaml.FullLoader) - if spec.get("host") =="__METAX_ENV_DOMAIN__": - spec["host"] = settings.SERVER_DOMAIN_NAME - outfile1.write(TEMPLATE % json.dumps(spec)) - infile1.close() - outfile1.close() - infile2 = open(inpathv2, 'r') - outfile2 = open(outpathv2, 'w') - spec = yaml.load(infile2, Loader=yaml.FullLoader) - if spec.get("host") =="__METAX_ENV_DOMAIN__": - spec["host"] = settings.SERVER_DOMAIN_NAME - outfile2.write(TEMPLATE % json.dumps(spec)) - infile2.close() - outfile2.close() + try: + inpathv1 = path.join(settings.SWAGGER_YAML_PATH, 'v1', 'swagger.yaml') + inpathv2 = path.join(settings.SWAGGER_YAML_PATH, 'v2', 'swagger.yaml') + outpathv1 = path.join(settings.SWAGGER_HTML_PATH, 'v1', 'swagger.html') + outpathv2 = path.join(settings.SWAGGER_HTML_PATH, 'v2', 'swagger.html') + outfile1 = open(outpathv1, 'w') + outfile2 = open(outpathv2, 'w') + try: + infile1 = open(inpathv1, 'r') + spec = yaml.load(infile1, Loader=yaml.FullLoader) + if spec.get("host") =="__METAX_ENV_DOMAIN__": + spec["host"] = settings.SERVER_DOMAIN_NAME + outfile1.write(TEMPLATE % json.dumps(spec)) + infile1.close() + except FileNotFoundError: + outfile1.write(FAIL_TEMPLATE) + + try: + infile2 = open(inpathv2, 'r') + spec = yaml.load(infile2, Loader=yaml.FullLoader) + if spec.get("host") =="__METAX_ENV_DOMAIN__": + spec["host"] = settings.SERVER_DOMAIN_NAME + outfile2.write(TEMPLATE % json.dumps(spec)) + infile2.close() + except FileNotFoundError: + outfile2.write(FAIL_TEMPLATE) + outfile1.close() + outfile2.close() + except yaml.YAMLError as exc: + _logger.error("YAML loading failed") + _logger.error(exc) + except json.decoder.JSONDecodeError as exc: + _logger.error("JSON loading failed") + _logger.error(exc) diff --git a/src/metax_api/views/router.py b/src/metax_api/views/router.py index 9f820219..deaaa082 100755 --- a/src/metax_api/views/router.py +++ b/src/metax_api/views/router.py @@ -6,10 +6,13 @@ # :license: MIT from django.conf.urls import url +from django.views.generic import TemplateView from metax_api.views.secure import secure_view view_urlpatterns = [ url(r"^logout?", secure_view.SecureLogoutView.as_view()), url(r"^secure/login?", secure_view.SecureLoginView.as_view()), + url(r"^swagger/v1", TemplateView.as_view(template_name='swagger/v1/swagger.html')), + url(r"^swagger/v2", TemplateView.as_view(template_name='swagger/v2/swagger.html')), ] From 52acf7def57f7c1a40fef61c166dd4895c1476d3 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Tue, 15 Jun 2021 15:20:14 +0300 Subject: [PATCH 028/160] Added more error handling and removed duplicate code block --- src/metax_api/utils/convert_yaml_to_html.py | 68 ++++++++++----------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/src/metax_api/utils/convert_yaml_to_html.py b/src/metax_api/utils/convert_yaml_to_html.py index ab4edc15..a1f3ff50 100644 --- a/src/metax_api/utils/convert_yaml_to_html.py +++ b/src/metax_api/utils/convert_yaml_to_html.py @@ -1,7 +1,7 @@ import logging import json import yaml -from os import path +from os import path, makedirs from django.conf import settings _logger = logging.getLogger(__name__) @@ -81,38 +81,38 @@ """ + def yaml_to_html_convert(): + try: - inpathv1 = path.join(settings.SWAGGER_YAML_PATH, 'v1', 'swagger.yaml') - inpathv2 = path.join(settings.SWAGGER_YAML_PATH, 'v2', 'swagger.yaml') - outpathv1 = path.join(settings.SWAGGER_HTML_PATH, 'v1', 'swagger.html') - outpathv2 = path.join(settings.SWAGGER_HTML_PATH, 'v2', 'swagger.html') - outfile1 = open(outpathv1, 'w') - outfile2 = open(outpathv2, 'w') - try: - infile1 = open(inpathv1, 'r') - spec = yaml.load(infile1, Loader=yaml.FullLoader) - if spec.get("host") =="__METAX_ENV_DOMAIN__": - spec["host"] = settings.SERVER_DOMAIN_NAME - outfile1.write(TEMPLATE % json.dumps(spec)) - infile1.close() - except FileNotFoundError: - outfile1.write(FAIL_TEMPLATE) - - try: - infile2 = open(inpathv2, 'r') - spec = yaml.load(infile2, Loader=yaml.FullLoader) - if spec.get("host") =="__METAX_ENV_DOMAIN__": - spec["host"] = settings.SERVER_DOMAIN_NAME - outfile2.write(TEMPLATE % json.dumps(spec)) - infile2.close() - except FileNotFoundError: - outfile2.write(FAIL_TEMPLATE) - outfile1.close() - outfile2.close() - except yaml.YAMLError as exc: - _logger.error("YAML loading failed") - _logger.error(exc) - except json.decoder.JSONDecodeError as exc: - _logger.error("JSON loading failed") - _logger.error(exc) + for api_version in ["v1", "v2"]: + inpath = path.join(settings.SWAGGER_YAML_PATH, api_version, 'swagger.yaml') + outpath = path.join(settings.SWAGGER_HTML_PATH, api_version, 'swagger.html') + + if not path.exists(path.dirname(outpath)): + try: + makedirs(path.dirname(outpath)) + except OSError as exc: + _logger.error(exc) + raise + + with open(outpath, 'w') as outfile: + try: + with open(inpath, 'r') as infile: + spec = yaml.load(infile, Loader=yaml.FullLoader) + if spec.get("host") == "__METAX_ENV_DOMAIN__": + spec["host"] = settings.SERVER_DOMAIN_NAME + outfile.write(TEMPLATE % json.dumps(spec)) + + except FileNotFoundError: + outfile.write(FAIL_TEMPLATE) + except yaml.YAMLError as exc: + _logger.error("YAML loading failed") + _logger.error(exc) + outfile.write(FAIL_TEMPLATE) + except json.decoder.JSONDecodeError as exc: + _logger.error("JSON loading failed") + _logger.error(exc) + + except PermissionError: + _logger.error("Permission error") From d4ec0959dcad76714499ddc0dadd20a3bbbd9dd5 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Fri, 18 Jun 2021 16:27:35 +0300 Subject: [PATCH 029/160] All host names changed in the generated swagger.html file --- .gitignore | 1 + src/metax_api/utils/convert_yaml_to_html.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 54fa40cb..553f38d8 100755 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ ubuntu-xenial-16.04-cloudimg-console.log !.env.template *.pem /src/log +/src/metax_api/templates/swagger # Unit test / coverage reports htmlcov/ diff --git a/src/metax_api/utils/convert_yaml_to_html.py b/src/metax_api/utils/convert_yaml_to_html.py index a1f3ff50..2b7bb7e5 100644 --- a/src/metax_api/utils/convert_yaml_to_html.py +++ b/src/metax_api/utils/convert_yaml_to_html.py @@ -99,9 +99,9 @@ def yaml_to_html_convert(): with open(outpath, 'w') as outfile: try: with open(inpath, 'r') as infile: - spec = yaml.load(infile, Loader=yaml.FullLoader) - if spec.get("host") == "__METAX_ENV_DOMAIN__": - spec["host"] = settings.SERVER_DOMAIN_NAME + readdata = infile.read() + indata = readdata.replace("__METAX_ENV_DOMAIN__", settings.SERVER_DOMAIN_NAME) + spec = yaml.load(indata, Loader=yaml.FullLoader) outfile.write(TEMPLATE % json.dumps(spec)) except FileNotFoundError: From fbd728444daeb0559015e2dce2320254c9c4adc7 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Wed, 30 Jun 2021 09:54:38 +0300 Subject: [PATCH 030/160] Swagger guidance added --- docs/api/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 docs/api/README.md diff --git a/docs/api/README.md b/docs/api/README.md new file mode 100644 index 00000000..b396c6af --- /dev/null +++ b/docs/api/README.md @@ -0,0 +1,13 @@ +The openapi/swagger specification version 2 is used to document the Metax REST API. +Rest api descriptions are stored in the repository in /metax_api/swagger/v1/swagger.yaml + and /metax_api/swagger/v2/swagger.yaml, depending on the interface version. +From both yaml files, the corresponding html files are generated in the /metax_api/templates/swagger/ directory when Metax starts. + +Swagger documentation can be edited directly in PyCharm and VS Code. There are good openapi plugins for both. One good plugin for both is the OpenAPI (Swagger) Editor. Another option is to use [Swagger editor](https://editor.swagger.io). + +[VS Code plugin](https://marketplace.visualstudio.com/items?itemName=42Crunch.vscode-openapi&ssr=false#review-details) + +[PyCharm plugin](https://plugins.jetbrains.com/plugin/14837-openapi-swagger-editor) + +Although OpenApi is allowed to edit in json and yaml formats, in the case of Metax it has to be done in yaml format because otherwise the conversion to html format will not be possible. +A good starting point for studying OpenApi is [OpenApi specification V2](https://swagger.io/specification/v2/) From 7ed2e03415f0bfc978fbe8fafcdc3028228b9e94 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Tue, 6 Jul 2021 14:32:17 +0300 Subject: [PATCH 031/160] Squashed commit of the following: commit fbd728444daeb0559015e2dce2320254c9c4adc7 Author: aptiaine Date: Wed Jun 30 09:54:38 2021 +0300 Swagger guidance added commit d4ec0959dcad76714499ddc0dadd20a3bbbd9dd5 Author: aptiaine Date: Fri Jun 18 16:27:35 2021 +0300 All host names changed in the generated swagger.html file commit 52acf7def57f7c1a40fef61c166dd4895c1476d3 Author: aptiaine Date: Tue Jun 15 15:20:14 2021 +0300 Added more error handling and removed duplicate code block commit 0eab45c51cfa1d8cc65bf6a462f93be0f1813f75 Author: aptiaine Date: Wed Jun 9 15:59:52 2021 +0300 CSCFAIRMETA-1056 move swagger to Metax Api commit 3e4e22811b41f7bf6a67aea1ac4e17be0747ff8b Author: aptiaine Date: Mon Jun 7 12:14:44 2021 +0300 converting yaml files to html on startup --- .gitignore | 1 + docs/api/README.md | 13 ++ src/metax_api/onappstart.py | 8 +- src/metax_api/settings/__init__.py | 2 + src/metax_api/settings/components/common.py | 3 + {swagger => src/metax_api/swagger}/README.md | 0 .../swagger}/swagger-yaml-to-html.py | 0 .../metax_api/swagger}/v1/swagger.yaml | 0 .../metax_api/swagger}/v2/swagger.yaml | 0 src/metax_api/utils/convert_yaml_to_html.py | 118 ++++++++++++++++++ src/metax_api/views/router.py | 3 + 11 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 docs/api/README.md rename {swagger => src/metax_api/swagger}/README.md (100%) rename {swagger => src/metax_api/swagger}/swagger-yaml-to-html.py (100%) rename {swagger => src/metax_api/swagger}/v1/swagger.yaml (100%) rename {swagger => src/metax_api/swagger}/v2/swagger.yaml (100%) create mode 100644 src/metax_api/utils/convert_yaml_to_html.py diff --git a/.gitignore b/.gitignore index 54fa40cb..553f38d8 100755 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ ubuntu-xenial-16.04-cloudimg-console.log !.env.template *.pem /src/log +/src/metax_api/templates/swagger # Unit test / coverage reports htmlcov/ diff --git a/docs/api/README.md b/docs/api/README.md new file mode 100644 index 00000000..b396c6af --- /dev/null +++ b/docs/api/README.md @@ -0,0 +1,13 @@ +The openapi/swagger specification version 2 is used to document the Metax REST API. +Rest api descriptions are stored in the repository in /metax_api/swagger/v1/swagger.yaml + and /metax_api/swagger/v2/swagger.yaml, depending on the interface version. +From both yaml files, the corresponding html files are generated in the /metax_api/templates/swagger/ directory when Metax starts. + +Swagger documentation can be edited directly in PyCharm and VS Code. There are good openapi plugins for both. One good plugin for both is the OpenAPI (Swagger) Editor. Another option is to use [Swagger editor](https://editor.swagger.io). + +[VS Code plugin](https://marketplace.visualstudio.com/items?itemName=42Crunch.vscode-openapi&ssr=false#review-details) + +[PyCharm plugin](https://plugins.jetbrains.com/plugin/14837-openapi-swagger-editor) + +Although OpenApi is allowed to edit in json and yaml formats, in the case of Metax it has to be done in yaml format because otherwise the conversion to html format will not be possible. +A good starting point for studying OpenApi is [OpenApi specification V2](https://swagger.io/specification/v2/) diff --git a/src/metax_api/onappstart.py b/src/metax_api/onappstart.py index 24f85914..47816fc7 100755 --- a/src/metax_api/onappstart.py +++ b/src/metax_api/onappstart.py @@ -13,7 +13,7 @@ from django.conf import settings from icecream import ic -from metax_api.utils import ReferenceDataLoader, executing_test_case +from metax_api.utils import ReferenceDataLoader, executing_test_case, convert_yaml_to_html _logger = logging.getLogger(__name__) @@ -101,4 +101,10 @@ def ready(self): # pragma: no cover _logger.error(e) _logger.error("Unable to initialize RabbitMQ exchanges") + try: + convert_yaml_to_html.yaml_to_html_convert() + except Exception as e: + _logger.error(e) + _logger.error("Unable to convert swagger documentation") + _logger.info("Metax API startup tasks finished") diff --git a/src/metax_api/settings/__init__.py b/src/metax_api/settings/__init__.py index fd0410ea..cbf5c837 100755 --- a/src/metax_api/settings/__init__.py +++ b/src/metax_api/settings/__init__.py @@ -59,6 +59,8 @@ SERVER_DOMAIN_NAME=(str, "metax.fd-dev.csc.fi"), VALIDATE_TOKEN_URL=(str, "https://127.0.0.1/secure/validate_token"), WKT_FILENAME=(str, join(REFDATA_INDEXER_PATH, "resources", "uri_to_wkt.json")), + SWAGGER_YAML_PATH=(str, join(BASE_DIR, "metax_api", "swagger")), + SWAGGER_HTML_PATH=(str, join(BASE_DIR, "metax_api", "templates", "swagger")), ) # reading .env file environ.Env.read_env() diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index cbf54286..24529eb2 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -188,3 +188,6 @@ {"password": "test-fds", "username": "fds"}, {"password": "test-download", "username": "download"}, ] + +SWAGGER_YAML_PATH = env('SWAGGER_YAML_PATH') +SWAGGER_HTML_PATH = env('SWAGGER_HTML_PATH') diff --git a/swagger/README.md b/src/metax_api/swagger/README.md similarity index 100% rename from swagger/README.md rename to src/metax_api/swagger/README.md diff --git a/swagger/swagger-yaml-to-html.py b/src/metax_api/swagger/swagger-yaml-to-html.py similarity index 100% rename from swagger/swagger-yaml-to-html.py rename to src/metax_api/swagger/swagger-yaml-to-html.py diff --git a/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml similarity index 100% rename from swagger/v1/swagger.yaml rename to src/metax_api/swagger/v1/swagger.yaml diff --git a/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml similarity index 100% rename from swagger/v2/swagger.yaml rename to src/metax_api/swagger/v2/swagger.yaml diff --git a/src/metax_api/utils/convert_yaml_to_html.py b/src/metax_api/utils/convert_yaml_to_html.py new file mode 100644 index 00000000..2b7bb7e5 --- /dev/null +++ b/src/metax_api/utils/convert_yaml_to_html.py @@ -0,0 +1,118 @@ +import logging +import json +import yaml +from os import path, makedirs +from django.conf import settings + +_logger = logging.getLogger(__name__) + +TEMPLATE = """ + + + + + Swagger UI + + + + + + +
+ + + + + + + +""" + +FAIL_TEMPLATE = """ + + + + + Swagger file creation failed + + +

Swagger file creation failed!

+ + +""" + + +def yaml_to_html_convert(): + + try: + for api_version in ["v1", "v2"]: + inpath = path.join(settings.SWAGGER_YAML_PATH, api_version, 'swagger.yaml') + outpath = path.join(settings.SWAGGER_HTML_PATH, api_version, 'swagger.html') + + if not path.exists(path.dirname(outpath)): + try: + makedirs(path.dirname(outpath)) + except OSError as exc: + _logger.error(exc) + raise + + with open(outpath, 'w') as outfile: + try: + with open(inpath, 'r') as infile: + readdata = infile.read() + indata = readdata.replace("__METAX_ENV_DOMAIN__", settings.SERVER_DOMAIN_NAME) + spec = yaml.load(indata, Loader=yaml.FullLoader) + outfile.write(TEMPLATE % json.dumps(spec)) + + except FileNotFoundError: + outfile.write(FAIL_TEMPLATE) + except yaml.YAMLError as exc: + _logger.error("YAML loading failed") + _logger.error(exc) + outfile.write(FAIL_TEMPLATE) + except json.decoder.JSONDecodeError as exc: + _logger.error("JSON loading failed") + _logger.error(exc) + + except PermissionError: + _logger.error("Permission error") diff --git a/src/metax_api/views/router.py b/src/metax_api/views/router.py index 9f820219..deaaa082 100755 --- a/src/metax_api/views/router.py +++ b/src/metax_api/views/router.py @@ -6,10 +6,13 @@ # :license: MIT from django.conf.urls import url +from django.views.generic import TemplateView from metax_api.views.secure import secure_view view_urlpatterns = [ url(r"^logout?", secure_view.SecureLogoutView.as_view()), url(r"^secure/login?", secure_view.SecureLoginView.as_view()), + url(r"^swagger/v1", TemplateView.as_view(template_name='swagger/v1/swagger.html')), + url(r"^swagger/v2", TemplateView.as_view(template_name='swagger/v2/swagger.html')), ] From 36c87a584abee3fda7e86654464a7101f6710a97 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 6 Jul 2021 09:22:31 +0300 Subject: [PATCH 032/160] fix: total files byte size of dataset is updated correctly --- src/metax_api/models/catalog_record_v2.py | 10 +++++- .../rest/v2/views/datasets/filehandling.py | 35 ++++++++++++++++++- 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index 3ecec3f7..c199bf6d 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -10,7 +10,7 @@ from copy import deepcopy from django.conf import settings -from django.db.models import Q +from django.db.models import Q, Sum from rest_framework.serializers import ValidationError from metax_api.exceptions import Http400, Http403 @@ -574,6 +574,14 @@ def _pre_update_operations(self, draft_publish=False): else: self.update_datacite = False + def _calculate_total_files_byte_size(self): + rd = self.research_dataset + rd["total_files_byte_size"] = 0 + if self.files.count() > 0: + rd["total_files_byte_size"] = ( + self.files.aggregate(Sum("byte_size"))["byte_size__sum"] or 0 + ) + def _update_dataset_specific_metadata(self, file_changes, operation_is_create=False): """ Take dataset-specific metadata (also knows as "user metadata") from file_changes and persist diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py index 8cdc088c..59929262 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py @@ -468,7 +468,7 @@ def _set_token_authentication(self): def _create_draft(self): self.cr_test_data["research_dataset"].pop("files", None) - self.cr_test_data["research_dataset"].pop("directories", None) + self.cr_test_data["research_dataset"].pop("total_files_byte_size", None) response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data["id"] @@ -813,6 +813,39 @@ def test_allow_file_changes_only_on_drafts(self): response.data, ) + def test_total_files_byte_size_is_updated_after_adding_files(self): + # create a draft dataset with zero files + cr_id = self._create_draft() + response = self.client.get(f"/rest/v2/datasets/{cr_id}") + cr_id = response.data["id"] + self.assertEqual(response.data.get("total_files_byte_size"), None) + + # add first file + file_changes = {} + + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_01.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) + self.assertEqual(response.data.get("files_added"), 1, response.data) + self.assert_file_count(cr_id, 1) + + response = self.client.get(f"/rest/v2/datasets/{cr_id}") + self.assert_total_files_byte_size(response.data, 100) + + # add second file + file_changes = {} + + self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_02.txt") + response = self.client.post( + "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + ) + self.assertEqual(response.data.get("files_added"), 1, response.data) + self.assert_file_count(cr_id, 2) + + response = self.client.get(f"/rest/v2/datasets/{cr_id}") + self.assert_total_files_byte_size(response.data, 200) + class CatalogRecordUserMetadata(CatalogRecordApiWriteAssignFilesCommonV2): From 1fae5aed3e94f35ddcf9e6fd535cc144892f0988 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 6 Jul 2021 15:24:52 +0300 Subject: [PATCH 033/160] fixed test according to comments on review --- .../api/rest/v2/views/datasets/filehandling.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py index 59929262..2d57d168 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py @@ -468,6 +468,7 @@ def _set_token_authentication(self): def _create_draft(self): self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) self.cr_test_data["research_dataset"].pop("total_files_byte_size", None) response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) @@ -820,12 +821,12 @@ def test_total_files_byte_size_is_updated_after_adding_files(self): cr_id = response.data["id"] self.assertEqual(response.data.get("total_files_byte_size"), None) - # add first file + # add file to dataset file_changes = {} self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_01.txt") response = self.client.post( - "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" + f"/rest/v2/datasets/{cr_id}/files", file_changes, format="json" ) self.assertEqual(response.data.get("files_added"), 1, response.data) self.assert_file_count(cr_id, 1) @@ -833,19 +834,6 @@ def test_total_files_byte_size_is_updated_after_adding_files(self): response = self.client.get(f"/rest/v2/datasets/{cr_id}") self.assert_total_files_byte_size(response.data, 100) - # add second file - file_changes = {} - - self._add_file(file_changes, "/TestExperiment/Directory_1/Group_1/file_02.txt") - response = self.client.post( - "/rest/v2/datasets/%d/files" % cr_id, file_changes, format="json" - ) - self.assertEqual(response.data.get("files_added"), 1, response.data) - self.assert_file_count(cr_id, 2) - - response = self.client.get(f"/rest/v2/datasets/{cr_id}") - self.assert_total_files_byte_size(response.data, 200) - class CatalogRecordUserMetadata(CatalogRecordApiWriteAssignFilesCommonV2): From 121aed07c156878386cc612a86affc95e18857c2 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Wed, 7 Jul 2021 11:09:42 +0300 Subject: [PATCH 034/160] updated permissions for qvain and qvain light services --- src/metax_api/settings/components/access_control.py | 4 ++++ src/metax_api/settings/environments/stable.py | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index ff5c3045..202459c2 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -107,6 +107,8 @@ def __lt__(self, other): Role.FDS, Role.TPAS, Role.END_USERS, + Role.QVAIN, + Role.QVAIN_LIGHT, ] api_permissions.rest.files["update"] = [ Role.METAX, @@ -114,6 +116,8 @@ def __lt__(self, other): Role.TPAS, Role.FDS, Role.END_USERS, + Role.QVAIN, + Role.QVAIN_LIGHT, ] api_permissions.rest.files.delete = [Role.METAX, Role.IDA, Role.TPAS] diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index ae2d77b6..2f59d398 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -7,9 +7,6 @@ api_permissions.rest.directories.read += [Role.IDA, Role.QVAIN_LIGHT] -api_permissions.rest.files.read += [Role.QVAIN, Role.QVAIN_LIGHT] -api_permissions.rest.files["update"] += [Role.QVAIN, Role.QVAIN_LIGHT] - api_permissions.rpc.datasets.change_cumulative_state.use = [ Role.METAX, Role.QVAIN, From 8071c6a4500c69ff24ab4c755dc5b671998f83f6 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Thu, 3 Jun 2021 10:31:58 +0300 Subject: [PATCH 035/160] CSCFAIRMETA-1047: Refactor docs to have single root --- docs/README.md | 11 -- docs/api/README.md | 21 ++- docs/{v1/source => api}/_static/custom.css | 0 docs/{v2/source => api}/conf.py | 4 +- docs/api/index.rst | 26 +++ docs/{v2/source => api/v1}/_static/custom.css | 0 .../source => api/v1}/_templates/layout.html | 0 docs/{v1/source => api/v1}/datasets.rst | 36 ++-- docs/{v1/source => api/v1}/end_users.rst | 2 +- docs/{v1/source => api/v1}/files.rst | 6 +- docs/{v1/source => api/v1}/general.rst | 2 +- docs/{v1/source => api/v1}/index.rst | 11 +- .../{v1/source => api/v1}/list_operations.rst | 0 docs/{v1/source => api/v1}/metax_api.rst | 0 .../source => api/v1}/metax_high_level.png | Bin .../source => api/v1}/metax_high_level.xml | 0 docs/{v1/source => api/v1}/quick_start.rst | 0 docs/{v2/source => api/v1}/reference_data.rst | 4 +- .../source => api/v2}/_templates/layout.html | 0 docs/{v2/source => api/v2}/datasets.rst | 28 +-- docs/{v2/source => api/v2}/end_users.rst | 2 +- docs/{v2/source => api/v2}/files.rst | 6 +- docs/{v2/source => api/v2}/general.rst | 2 +- docs/{v2/source => api/v2}/index.rst | 11 +- .../{v2/source => api/v2}/list_operations.rst | 0 docs/{v2/source => api/v2}/metax_api.rst | 0 .../source => api/v2}/metax_high_level.png | Bin .../source => api/v2}/metax_high_level.xml | 0 docs/{v2/source => api/v2}/quick_start.rst | 0 docs/{v1/source => api/v2}/reference_data.rst | 4 +- docs/v1/source/conf.py | 172 ------------------ docs/v1/source/oai_pmh.rst | 5 - docs/v2/source/oai_pmh.rst | 5 - 33 files changed, 103 insertions(+), 255 deletions(-) delete mode 100755 docs/README.md rename docs/{v1/source => api}/_static/custom.css (100%) rename docs/{v2/source => api}/conf.py (97%) create mode 100755 docs/api/index.rst rename docs/{v2/source => api/v1}/_static/custom.css (100%) rename docs/{v1/source => api/v1}/_templates/layout.html (100%) rename docs/{v1/source => api/v1}/datasets.rst (97%) rename docs/{v1/source => api/v1}/end_users.rst (97%) rename docs/{v1/source => api/v1}/files.rst (98%) rename docs/{v1/source => api/v1}/general.rst (99%) rename docs/{v1/source => api/v1}/index.rst (79%) rename docs/{v1/source => api/v1}/list_operations.rst (100%) rename docs/{v1/source => api/v1}/metax_api.rst (100%) rename docs/{v1/source => api/v1}/metax_high_level.png (100%) rename docs/{v1/source => api/v1}/metax_high_level.xml (100%) rename docs/{v1/source => api/v1}/quick_start.rst (100%) rename docs/{v2/source => api/v1}/reference_data.rst (99%) rename docs/{v2/source => api/v2}/_templates/layout.html (100%) rename docs/{v2/source => api/v2}/datasets.rst (98%) rename docs/{v2/source => api/v2}/end_users.rst (97%) rename docs/{v2/source => api/v2}/files.rst (98%) rename docs/{v2/source => api/v2}/general.rst (99%) rename docs/{v2/source => api/v2}/index.rst (79%) rename docs/{v2/source => api/v2}/list_operations.rst (100%) rename docs/{v2/source => api/v2}/metax_api.rst (100%) rename docs/{v2/source => api/v2}/metax_high_level.png (100%) rename docs/{v2/source => api/v2}/metax_high_level.xml (100%) rename docs/{v2/source => api/v2}/quick_start.rst (100%) rename docs/{v1/source => api/v2}/reference_data.rst (99%) delete mode 100755 docs/v1/source/conf.py delete mode 100755 docs/v1/source/oai_pmh.rst delete mode 100755 docs/v2/source/oai_pmh.rst diff --git a/docs/README.md b/docs/README.md deleted file mode 100755 index 938fd972..00000000 --- a/docs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -## Enabling autobuilds - -To install required dependencies run the following: -``pip install sphinx sphinx-autobuild sphinx_rtd_theme`` - -To start server, run following in metax-api directory: -``sphinx-autobuild -t {envtag} docs/source/ docs/build/``, where {envtag} is one of local_development, test, stable or production. Envtag determines the target environment of the documentation. - -Note that the server should be run on the host machine since virtual machine does not build docs automatically. - -To conditionally add parts of the documentation, use only -directive. See [This](https://github.com/sphinx-doc/sphinx/issues/1115) for known issue with this directive and headings. \ No newline at end of file diff --git a/docs/api/README.md b/docs/api/README.md index b396c6af..335c3c76 100644 --- a/docs/api/README.md +++ b/docs/api/README.md @@ -1,3 +1,9 @@ +# Developing documentation + +Metax has API documentation implemented with Swagger and Sphinx. Below are instructions how they can be improved. + +## Swagger + The openapi/swagger specification version 2 is used to document the Metax REST API. Rest api descriptions are stored in the repository in /metax_api/swagger/v1/swagger.yaml and /metax_api/swagger/v2/swagger.yaml, depending on the interface version. @@ -10,4 +16,17 @@ Swagger documentation can be edited directly in PyCharm and VS Code. There are g [PyCharm plugin](https://plugins.jetbrains.com/plugin/14837-openapi-swagger-editor) Although OpenApi is allowed to edit in json and yaml formats, in the case of Metax it has to be done in yaml format because otherwise the conversion to html format will not be possible. -A good starting point for studying OpenApi is [OpenApi specification V2](https://swagger.io/specification/v2/) +A good starting point for studying OpenApi is [OpenApi specification V2](https://swagger.io/specification/v2/) + +## Sphinx +### Enabling autobuilds + +To install required dependencies run the following: +``pip install sphinx sphinx-autobuild sphinx_rtd_theme`` + +To start server, run following in metax-api directory: +``sphinx-autobuild -t {envtag} docs/source/ docs/build/``, where {envtag} is one of local_development, test, stable or production. Envtag determines the target environment of the documentation. + +Note that the server should be run on the host machine since virtual machine does not build docs automatically. + +To conditionally add parts of the documentation, use only -directive. See [This](https://github.com/sphinx-doc/sphinx/issues/1115) for known issue with this directive and headings. diff --git a/docs/v1/source/_static/custom.css b/docs/api/_static/custom.css similarity index 100% rename from docs/v1/source/_static/custom.css rename to docs/api/_static/custom.css diff --git a/docs/v2/source/conf.py b/docs/api/conf.py similarity index 97% rename from docs/v2/source/conf.py rename to docs/api/conf.py index 33ac0958..65d2f480 100755 --- a/docs/v2/source/conf.py +++ b/docs/api/conf.py @@ -28,6 +28,7 @@ # The full version, including alpha/beta/rc tags release = "" +fqdn = "metax.fairdata.fi" # -- General configuration --------------------------------------------------- @@ -111,6 +112,7 @@ # # html_sidebars = {} +rst_prolog = f".. |__METAX_ENV_DOMAIN__| replace:: {fqdn}" # -- Options for HTMLHelp output --------------------------------------------- @@ -169,4 +171,4 @@ def setup(app): - app.add_stylesheet("custom.css") + app.add_css_file("custom.css") diff --git a/docs/api/index.rst b/docs/api/index.rst new file mode 100755 index 00000000..84fcea84 --- /dev/null +++ b/docs/api/index.rst @@ -0,0 +1,26 @@ + +.. Metax API documentation master file, created by + sphinx-quickstart on Wed Sep 5 13:21:50 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +======================= +Metax API Documentation +======================= + +.. toctree:: + :maxdepth: 5 + :caption: Metax API: + + v1/index + v2/index + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/v2/source/_static/custom.css b/docs/api/v1/_static/custom.css similarity index 100% rename from docs/v2/source/_static/custom.css rename to docs/api/v1/_static/custom.css diff --git a/docs/v1/source/_templates/layout.html b/docs/api/v1/_templates/layout.html similarity index 100% rename from docs/v1/source/_templates/layout.html rename to docs/api/v1/_templates/layout.html diff --git a/docs/v1/source/datasets.rst b/docs/api/v1/datasets.rst similarity index 97% rename from docs/v1/source/datasets.rst rename to docs/api/v1/datasets.rst index 6af98008..4b81e42a 100755 --- a/docs/v1/source/datasets.rst +++ b/docs/api/v1/datasets.rst @@ -20,7 +20,7 @@ Data model visualization The dataset data model visualization can be seen here https://tietomallit.suomi.fi/model/mrd. The data model visualization is very helpful to keep open when creating dataset metadata descriptions, as it shows all the different possible fields and relations, which can be used, which fields are mandatory, and so on. -Additionally, the chosen data catalog may have some additional restrictions to the schema, such as reduced relations, or reduced mandatory fields. Read more about data catalogs and their implications here :ref:`rst-data-catalogs`. +Additionally, the chosen data catalog may have some additional restrictions to the schema, such as reduced relations, or reduced mandatory fields. Read more about data catalogs and their implications here :ref:`rst-data-catalogs-v1`. @@ -74,7 +74,7 @@ Preferred identifier is the "public" identifier of a dataset. When referring to -.. _rst-data-catalogs: +.. _rst-data-catalogs-v1: Data Catalogs ^^^^^^^^^^^^^^ @@ -146,7 +146,7 @@ When creating a new dataset and wishing to use for example the ATT catalog, the assert response.status_code == 201, response.content -For more involving examples, see the :ref:`rst-dataset-examples` section for datasets. +For more involving examples, see the :ref:`examples` section for datasets. @@ -201,7 +201,7 @@ If the requested resource has not been modified after the date specified in the -.. _rst-dataset-versioning: +.. _rst-dataset-versioning-v1: Dataset versioning ------------------- @@ -262,7 +262,7 @@ New metadata versions are not visible in the returned response in any way, excep **How do I know beforehand if a new dataset version is going to be created?** -Take a look at the topic :ref:`rst-describing-and-adding-files`. +Take a look at the topic :ref:`rst-describing-and-adding-files-v1`. @@ -329,12 +329,12 @@ If the field ``alternate_record_set`` is missing from a record, it means there a -.. _rst-describing-and-adding-files: +.. _rst-describing-and-adding-files-v1: Describing files vs. adding and removing files ----------------------------------------------- -A distinction needs to be made between *describing* files in a dataset, and *adding or removing* files. As explained in the topic :ref:`rst-dataset-versioning`, just editing a dataset's metadata (including the dataset-specific file metadata in fields ``research_dataset.files`` and ``research_dataset.directories``) does not produce new dataset versions, while *adding* new files will produce new dataset versions, as will *removing* files. Yet, both describing the files, and adding or removing files, happens by inserting objects inside the fields ``research_dataset.files`` and ``research_dataset.directories``, or by removing the same objects when wishing to remove files from a dataset. How to know which is which, and what to expect when updating datasets and dealing with files? +A distinction needs to be made between *describing* files in a dataset, and *adding or removing* files. As explained in the topic :ref:`rst-dataset-versioning-v1`, just editing a dataset's metadata (including the dataset-specific file metadata in fields ``research_dataset.files`` and ``research_dataset.directories``) does not produce new dataset versions, while *adding* new files will produce new dataset versions, as will *removing* files. Yet, both describing the files, and adding or removing files, happens by inserting objects inside the fields ``research_dataset.files`` and ``research_dataset.directories``, or by removing the same objects when wishing to remove files from a dataset. How to know which is which, and what to expect when updating datasets and dealing with files? **Adding and describing single files** @@ -389,7 +389,7 @@ If you want to use an existing dataset as a template for a new dataset, you can -.. _rst-datasets-reference-data: +.. _rst-datasets-reference-data-v1: Reference data guide --------------------- @@ -448,7 +448,7 @@ All this needs to be taken into account when looking which reference data to use -.. _rst-datasets-reference-data-table: +.. _rst-datasets-reference-data-table-v1: Reference data table ^^^^^^^^^^^^^^^^^^^^^ @@ -463,7 +463,7 @@ It helps to have the `research_dataset data model visualization `. + More information about updating a dataset can be found in :ref:`Update examples`. **Changing license** @@ -584,7 +584,7 @@ It helps to have the `research_dataset data model visualization ` for more information about update process. + Please refer to :ref:`Update examples` for more information about update process. .. note:: Changing the license for REMS managed dataset closes all existing download accesses to the dataset. @@ -601,7 +601,7 @@ It helps to have the `research_dataset data model visualization `` should be provided, in order to retrieve only those files and directories, which are included in the specified dataset. +The second way is by using the same API as is used to generally browse the files of a project (see :ref:`rst-browsing-files-v1`). Browsing the files of a dataset works the same way, except that an additional query parameter ``cr_identifier=`` should be provided, in order to retrieve only those files and directories, which are included in the specified dataset. Example: @@ -1186,14 +1186,14 @@ Using reference data Modifying ``research_dataset`` to contain data that depends on reference data. -Be sure to also check out :ref:`rst-reference-data-query-examples` for useful examples how to browse reference data in general. +Be sure to also check out :ref:`rst-reference-data-query-examples-v1` for useful examples how to browse reference data in general. Add a directory ~~~~~~~~~~~~~~~~ -Below example assumes an existing bare minimum dataset, to which a directory of files is being added. The directory-object has a mandatory field called ``use_category``, which requires using a value from reference data in its ``identifier`` field. In the dataset reference data table on this same page(:ref:`rst-datasets-reference-data-table`), we should be able to find this row: +Below example assumes an existing bare minimum dataset, to which a directory of files is being added. The directory-object has a mandatory field called ``use_category``, which requires using a value from reference data in its ``identifier`` field. In the dataset reference data table on this same page(:ref:`rst-datasets-reference-data-table-v1`), we should be able to find this row: .. code-block:: python diff --git a/docs/v1/source/end_users.rst b/docs/api/v1/end_users.rst similarity index 97% rename from docs/v1/source/end_users.rst rename to docs/api/v1/end_users.rst index 6baeffb0..0fd90f8b 100755 --- a/docs/v1/source/end_users.rst +++ b/docs/api/v1/end_users.rst @@ -11,7 +11,7 @@ Compared to public users, end users have extra access to the following APIs: * ``/rest/directories`` read access for files in user's projects -.. _rst-end-user-authentication: +.. _rst-end-user-authentication-v1: Authentication --------------- diff --git a/docs/v1/source/files.rst b/docs/api/v1/files.rst similarity index 98% rename from docs/v1/source/files.rst rename to docs/api/v1/files.rst index 40c2156b..ee95dca9 100755 --- a/docs/v1/source/files.rst +++ b/docs/api/v1/files.rst @@ -11,7 +11,7 @@ The ``/rest/files`` API supports creating, retrieving, updating, and deleting fi Write-operations to the ``/rest/files`` API is generally limited only to Fairdata services. In practice, new file metadata only appears to Metax as a result of freezing files in the Fairdata IDA service, or during some PAS processes. -End users will only be able to browse file metadata of projects where they are a member, end edit a limited set of metadata fields. Details about browsing files using the Metax API can be found later in this document :ref:`here `, and in swagger. +End users will only be able to browse file metadata of projects where they are a member, end edit a limited set of metadata fields. Details about browsing files using the Metax API can be found later in this document :ref:`here `, and in swagger. @@ -65,7 +65,7 @@ This is just a quick overview, below code examples include some use of them, and In the public browse API's, a dataset's access restrictions or embargoes may apply, and only limited metadata may be returned. Authentication for these public API's is optional, but by authenticating access restrictions may be lifted, for example due to ownership of the published dataset, etc. -.. _rst-files-reference-data: +.. _rst-files-reference-data-v1: Reference data guide --------------------- @@ -130,7 +130,7 @@ Example payload to create a file in Metax (``POST /rest/files``). -.. _rst-browsing-files: +.. _rst-browsing-files-v1: Browsing files ^^^^^^^^^^^^^^^ diff --git a/docs/v1/source/general.rst b/docs/api/v1/general.rst similarity index 99% rename from docs/v1/source/general.rst rename to docs/api/v1/general.rst index 82f81626..527eb980 100755 --- a/docs/v1/source/general.rst +++ b/docs/api/v1/general.rst @@ -55,7 +55,7 @@ An URL of the form https://__METAX_ENV_DOMAIN__/rest/datasets always points to t API Authentication ------------------- -Basic Authentication and Bearer Tokens are used for access control for certain APIs. Basic Authentication credentials are distributed only to known Fairdata services. End Users are able to utilize Bearer tokens in order to interact with certain APIs. Read more about End User authentication and token use at :ref:`rst-end-user-authentication`. +Basic Authentication and Bearer Tokens are used for access control for certain APIs. Basic Authentication credentials are distributed only to known Fairdata services. End Users are able to utilize Bearer tokens in order to interact with certain APIs. Read more about End User authentication and token use at :ref:`rst-end-user-authentication-v1`. Write operations (``POST``, ``PUT``, ``PATCH``, ``DELETE``) always require authentication. Some APIs require no authentication when reading (``GET`` operations), while others do. Authentication-related errors will result in a HTTP 401 or 403 error. diff --git a/docs/v1/source/index.rst b/docs/api/v1/index.rst similarity index 79% rename from docs/v1/source/index.rst rename to docs/api/v1/index.rst index 89205247..709e8628 100755 --- a/docs/v1/source/index.rst +++ b/docs/api/v1/index.rst @@ -4,14 +4,14 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -======================= -Metax API Documentation -======================= +========================== +Metax API V1 Documentation +========================== .. toctree:: :numbered: :maxdepth: 5 - :caption: Metax API Contents: + :caption: Contents: metax_api quick_start @@ -21,8 +21,6 @@ Metax API Documentation datasets files reference_data - .. oai_pmh - Indices and tables @@ -31,4 +29,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/docs/v1/source/list_operations.rst b/docs/api/v1/list_operations.rst similarity index 100% rename from docs/v1/source/list_operations.rst rename to docs/api/v1/list_operations.rst diff --git a/docs/v1/source/metax_api.rst b/docs/api/v1/metax_api.rst similarity index 100% rename from docs/v1/source/metax_api.rst rename to docs/api/v1/metax_api.rst diff --git a/docs/v1/source/metax_high_level.png b/docs/api/v1/metax_high_level.png similarity index 100% rename from docs/v1/source/metax_high_level.png rename to docs/api/v1/metax_high_level.png diff --git a/docs/v1/source/metax_high_level.xml b/docs/api/v1/metax_high_level.xml similarity index 100% rename from docs/v1/source/metax_high_level.xml rename to docs/api/v1/metax_high_level.xml diff --git a/docs/v1/source/quick_start.rst b/docs/api/v1/quick_start.rst similarity index 100% rename from docs/v1/source/quick_start.rst rename to docs/api/v1/quick_start.rst diff --git a/docs/v2/source/reference_data.rst b/docs/api/v1/reference_data.rst similarity index 99% rename from docs/v2/source/reference_data.rst rename to docs/api/v1/reference_data.rst index 4d4bd526..9f7ce98c 100755 --- a/docs/v2/source/reference_data.rst +++ b/docs/api/v1/reference_data.rst @@ -4,7 +4,7 @@ Reference Data "Reference Data" is a fancy name for "a list of known values". When creating new objects in Metax, or updating existing objects, values for some fields are checked against an approriate category in this pre-defined list of known values. For example, when setting the language of a dataset, "my cat" is not a valid value. Instead, a valid value must be chosen from the language reference data listed at https://__METAX_ENV_DOMAIN__/es/reference_data/language/_search?pretty=true (the link only shows a few values, since the list of languages is pretty huge, but it gives you a glimpse of what reference data in practice looks like). -Some other examples for reference data use include describing the purpose of associated files in a dataset (`Use Category `_), the manner how contributors were involved in a research (`Contributor Role `_), or telling which fields of science the research fits into (`Field of Science `_). The docs for each API has a section dedicated for reference data, which explains when use of reference data is required. For example for datasets, that section can be found here :ref:`rst-datasets-reference-data`. +Some other examples for reference data use include describing the purpose of associated files in a dataset (`Use Category `_), the manner how contributors were involved in a research (`Contributor Role `_), or telling which fields of science the research fits into (`Field of Science `_). The docs for each API has a section dedicated for reference data, which explains when use of reference data is required. For example for datasets, that section can be found here :ref:`rst-datasets-reference-data-v1`. The Metax API documentation contains some examples for querying the reference data index, but since the index is a ElasticSearch database, it is best to refer to the official ElasticSearch documentation for how to build more complex search queries. @@ -151,7 +151,7 @@ Most often, the field that will get populated from the reference data will be th -.. _rst-reference-data-query-examples: +.. _rst-reference-data-query-examples-v1: Querying Reference Data ------------------------ diff --git a/docs/v2/source/_templates/layout.html b/docs/api/v2/_templates/layout.html similarity index 100% rename from docs/v2/source/_templates/layout.html rename to docs/api/v2/_templates/layout.html diff --git a/docs/v2/source/datasets.rst b/docs/api/v2/datasets.rst similarity index 98% rename from docs/v2/source/datasets.rst rename to docs/api/v2/datasets.rst index ab928343..f4bae3e8 100755 --- a/docs/v2/source/datasets.rst +++ b/docs/api/v2/datasets.rst @@ -22,7 +22,7 @@ Data model visualization The dataset data model visualization can be seen here https://tietomallit.suomi.fi/model/mrd. The data model visualization is very helpful to keep open when creating dataset metadata descriptions, as it shows all the different possible fields and relations, which can be used, which fields are mandatory, and so on. -Additionally, the chosen data catalog may have some additional restrictions to the schema, such as reduced relations, or reduced mandatory fields. Read more about data catalogs and their implications here :ref:`rst-data-catalogs`. +Additionally, the chosen data catalog may have some additional restrictions to the schema, such as reduced relations, or reduced mandatory fields. Read more about data catalogs and their implications here :ref:`rst-data-catalogs-v2`. @@ -80,7 +80,7 @@ Preferred identifier is the "public" identifier of a dataset. When referring to When a user has added some files to a dataset, the user can choose to write additional descriptions to those files. The files already include various automatically generated technical metadata, such as byte sizes, mime types, checksum values and algorithms and such, but any extra metadata that the user wishes to enter about some file is called "user metadata". -.. _rst-data-catalogs: +.. _rst-data-catalogs-v2: Data Catalogs ^^^^^^^^^^^^^^ @@ -160,7 +160,7 @@ When creating a new dataset and wishing to use for example the ATT catalog, the assert response.status_code == 201, response.content -For more involving examples, see the :ref:`rst-dataset-examples` section for datasets. +For more involving examples, see the :ref:`examples` section for datasets. @@ -174,7 +174,7 @@ Dataset lifecycle in Metax * The dataset can be premanently deleted at any time by the user. 2) A dataset is published. When the dataset is published: * The dataset's metadata descriptions can still be updated at any time. - * Files can no longer be freely added or removed (a couple of exceptions remain, see :ref:`rst-dataset-versioning`). + * Files can no longer be freely added or removed (a couple of exceptions remain, see :ref:`rst-dataset-versioning-v2`). * The dataset becomes publicly findable (any selected access restrictions, such as embargo, applies). * Dataset receives permanent resolvable identifiers. * Dataset can no longer be premanently deleted. A tombstone page will remain after deletion. @@ -231,7 +231,7 @@ If the requested resource has not been modified after the date specified in the -.. _rst-dataset-versioning: +.. _rst-dataset-versioning-v2: Dataset versioning ------------------- @@ -360,7 +360,7 @@ If you want to use an existing dataset as a template for a new dataset, you can -.. _rst-datasets-reference-data: +.. _rst-datasets-reference-data-v2: Reference data guide --------------------- @@ -419,7 +419,7 @@ All this needs to be taken into account when looking which reference data to use -.. _rst-datasets-reference-data-table: +.. _rst-datasets-reference-data-table-v2: Reference data table ^^^^^^^^^^^^^^^^^^^^^ @@ -434,7 +434,7 @@ It helps to have the `research_dataset data model visualization `. + More information about updating a dataset can be found in :ref:`Update examples`. **Changing license** @@ -555,7 +555,7 @@ It helps to have the `research_dataset data model visualization ` for more information about update process. + Please refer to :ref:`Update examples` for more information about update process. .. note:: Changing the license for REMS managed dataset closes all existing download accesses to the dataset. @@ -572,7 +572,7 @@ It helps to have the `research_dataset data model visualization `` should be provided, in order to retrieve only those files and directories, which are included in the specified dataset. +The second way is by using the same API as is used to generally browse the files of a project (see :ref:`rst-browsing-files-v2`). Browsing the files of a dataset works the same way, except that an additional query parameter ``cr_identifier=`` should be provided, in order to retrieve only those files and directories, which are included in the specified dataset. Example: @@ -1446,14 +1446,14 @@ Using reference data Modifying ``research_dataset`` to contain data that depends on reference data. -Be sure to also check out :ref:`rst-reference-data-query-examples` for useful examples how to browse reference data in general. +Be sure to also check out :ref:`rst-reference-data-query-examples-v2` for useful examples how to browse reference data in general. Add a directory ~~~~~~~~~~~~~~~~ -Below example assumes an existing bare minimum draft dataset, to which some files have already been added. This example adds some user metadata to that directory. The directory-object has a mandatory field called ``use_category``, which requires using a value from reference data in its ``identifier`` field. In the dataset reference data table on this same page(:ref:`rst-datasets-reference-data-table`), we should be able to find this row: +Below example assumes an existing bare minimum draft dataset, to which some files have already been added. This example adds some user metadata to that directory. The directory-object has a mandatory field called ``use_category``, which requires using a value from reference data in its ``identifier`` field. In the dataset reference data table on this same page(:ref:`rst-datasets-reference-data-table-v2`), we should be able to find this row: .. code-block:: python diff --git a/docs/v2/source/end_users.rst b/docs/api/v2/end_users.rst similarity index 97% rename from docs/v2/source/end_users.rst rename to docs/api/v2/end_users.rst index 0b766920..37801e63 100755 --- a/docs/v2/source/end_users.rst +++ b/docs/api/v2/end_users.rst @@ -12,7 +12,7 @@ Compared to public users, end users have extra access to the following APIs: * ``/rpc/v2/datasets`` write access to owned datasets -.. _rst-end-user-authentication: +.. _rst-end-user-authentication-v2: Authentication --------------- diff --git a/docs/v2/source/files.rst b/docs/api/v2/files.rst similarity index 98% rename from docs/v2/source/files.rst rename to docs/api/v2/files.rst index db8500f8..024a52e9 100755 --- a/docs/v2/source/files.rst +++ b/docs/api/v2/files.rst @@ -11,7 +11,7 @@ The ``/rest/files`` API supports creating, retrieving, updating, and deleting fi Write-operations to the ``/rest/files`` API is generally limited only to Fairdata services. In practice, new file metadata only appears to Metax as a result of freezing files in the Fairdata IDA service, or during some PAS processes. -End users will only be able to browse file metadata of projects where they are a member, end edit a limited set of metadata fields. Details about browsing files using the Metax API can be found later in this document :ref:`here `, and in swagger. +End users will only be able to browse file metadata of projects where they are a member, end edit a limited set of metadata fields. Details about browsing files using the Metax API can be found later in this document :ref:`here `, and in swagger. @@ -65,7 +65,7 @@ This is just a quick overview, below code examples include some use of them, and In the public browse API's, a dataset's access restrictions or embargoes may apply, and only limited metadata may be returned. Authentication for these public API's is optional, but by authenticating access restrictions may be lifted, for example due to ownership of the published dataset, etc. -.. _rst-files-reference-data: +.. _rst-files-reference-data-v2: Reference data guide --------------------- @@ -129,7 +129,7 @@ Example payload to create a file in Metax (``POST /rest/files``). -.. _rst-browsing-files: +.. _rst-browsing-files-v2: Browsing files ^^^^^^^^^^^^^^^ diff --git a/docs/v2/source/general.rst b/docs/api/v2/general.rst similarity index 99% rename from docs/v2/source/general.rst rename to docs/api/v2/general.rst index 4797e00b..3f82b5d9 100755 --- a/docs/v2/source/general.rst +++ b/docs/api/v2/general.rst @@ -60,7 +60,7 @@ An URL of the form https://__METAX_ENV_DOMAIN__/rest/datasets (no API version sp API Authentication ------------------- -Basic Authentication and Bearer Tokens are used for access control for certain APIs. Basic Authentication credentials are distributed only to known Fairdata services. End Users are able to utilize Bearer tokens in order to interact with certain APIs. Read more about End User authentication and token use at :ref:`rst-end-user-authentication`. +Basic Authentication and Bearer Tokens are used for access control for certain APIs. Basic Authentication credentials are distributed only to known Fairdata services. End Users are able to utilize Bearer tokens in order to interact with certain APIs. Read more about End User authentication and token use at :ref:`rst-end-user-authentication-v2`. Write operations (``POST``, ``PUT``, ``PATCH``, ``DELETE``) always require authentication. Some APIs require no authentication when reading (``GET`` operations), while others do. Authentication-related errors will result in a HTTP 401 or 403 error. diff --git a/docs/v2/source/index.rst b/docs/api/v2/index.rst similarity index 79% rename from docs/v2/source/index.rst rename to docs/api/v2/index.rst index 89205247..80851a1e 100755 --- a/docs/v2/source/index.rst +++ b/docs/api/v2/index.rst @@ -4,14 +4,14 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -======================= -Metax API Documentation -======================= +========================== +Metax API V2 Documentation +========================== .. toctree:: :numbered: :maxdepth: 5 - :caption: Metax API Contents: + :caption: Contents: metax_api quick_start @@ -21,8 +21,6 @@ Metax API Documentation datasets files reference_data - .. oai_pmh - Indices and tables @@ -31,4 +29,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/docs/v2/source/list_operations.rst b/docs/api/v2/list_operations.rst similarity index 100% rename from docs/v2/source/list_operations.rst rename to docs/api/v2/list_operations.rst diff --git a/docs/v2/source/metax_api.rst b/docs/api/v2/metax_api.rst similarity index 100% rename from docs/v2/source/metax_api.rst rename to docs/api/v2/metax_api.rst diff --git a/docs/v2/source/metax_high_level.png b/docs/api/v2/metax_high_level.png similarity index 100% rename from docs/v2/source/metax_high_level.png rename to docs/api/v2/metax_high_level.png diff --git a/docs/v2/source/metax_high_level.xml b/docs/api/v2/metax_high_level.xml similarity index 100% rename from docs/v2/source/metax_high_level.xml rename to docs/api/v2/metax_high_level.xml diff --git a/docs/v2/source/quick_start.rst b/docs/api/v2/quick_start.rst similarity index 100% rename from docs/v2/source/quick_start.rst rename to docs/api/v2/quick_start.rst diff --git a/docs/v1/source/reference_data.rst b/docs/api/v2/reference_data.rst similarity index 99% rename from docs/v1/source/reference_data.rst rename to docs/api/v2/reference_data.rst index 4d4bd526..c70b3836 100755 --- a/docs/v1/source/reference_data.rst +++ b/docs/api/v2/reference_data.rst @@ -4,7 +4,7 @@ Reference Data "Reference Data" is a fancy name for "a list of known values". When creating new objects in Metax, or updating existing objects, values for some fields are checked against an approriate category in this pre-defined list of known values. For example, when setting the language of a dataset, "my cat" is not a valid value. Instead, a valid value must be chosen from the language reference data listed at https://__METAX_ENV_DOMAIN__/es/reference_data/language/_search?pretty=true (the link only shows a few values, since the list of languages is pretty huge, but it gives you a glimpse of what reference data in practice looks like). -Some other examples for reference data use include describing the purpose of associated files in a dataset (`Use Category `_), the manner how contributors were involved in a research (`Contributor Role `_), or telling which fields of science the research fits into (`Field of Science `_). The docs for each API has a section dedicated for reference data, which explains when use of reference data is required. For example for datasets, that section can be found here :ref:`rst-datasets-reference-data`. +Some other examples for reference data use include describing the purpose of associated files in a dataset (`Use Category `_), the manner how contributors were involved in a research (`Contributor Role `_), or telling which fields of science the research fits into (`Field of Science `_). The docs for each API has a section dedicated for reference data, which explains when use of reference data is required. For example for datasets, that section can be found here :ref:`rst-datasets-reference-data-v2`. The Metax API documentation contains some examples for querying the reference data index, but since the index is a ElasticSearch database, it is best to refer to the official ElasticSearch documentation for how to build more complex search queries. @@ -151,7 +151,7 @@ Most often, the field that will get populated from the reference data will be th -.. _rst-reference-data-query-examples: +.. _rst-reference-data-query-examples-v2: Querying Reference Data ------------------------ diff --git a/docs/v1/source/conf.py b/docs/v1/source/conf.py deleted file mode 100755 index 9f1010eb..00000000 --- a/docs/v1/source/conf.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - - -# -- Project information ----------------------------------------------------- - -project = "Metax API" -copyright = "2018, csc.fi" -author = "csc.fi" - -# The short X.Y version -version = "" -# The full version, including alpha/beta/rc tags -release = "" - - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The master toctree document. -master_doc = "index" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path . -exclude_patterns = [] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "sphinx_rtd_theme" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} -html_theme_options = { - "canonical_url": "", - "analytics_id": "", - "logo_only": False, - "display_version": True, - "prev_next_buttons_location": "bottom", - "style_external_links": False, - # Toc options - "collapse_navigation": True, - "sticky_navigation": True, - "navigation_depth": -1, - "includehidden": True, - "titles_only": False, -} - - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = "MetaxAPIdoc" - - -# -- Options for LaTeX output ------------------------------------------------ - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, "MetaxAPI.tex", "Metax API Documentation", "csc.fi", "manual"), -] - - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "metaxapi", "Metax API Documentation", [author], 1)] - - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "MetaxAPI", - "Metax API Documentation", - author, - "MetaxAPI", - "One line description of project.", - "Miscellaneous", - ), -] - - -def setup(app): - app.add_stylesheet("custom.css") diff --git a/docs/v1/source/oai_pmh.rst b/docs/v1/source/oai_pmh.rst deleted file mode 100755 index 239efefd..00000000 --- a/docs/v1/source/oai_pmh.rst +++ /dev/null @@ -1,5 +0,0 @@ - -OAI-PMH -======== - -This thing also exists. diff --git a/docs/v2/source/oai_pmh.rst b/docs/v2/source/oai_pmh.rst deleted file mode 100755 index 239efefd..00000000 --- a/docs/v2/source/oai_pmh.rst +++ /dev/null @@ -1,5 +0,0 @@ - -OAI-PMH -======== - -This thing also exists. From 3f1cc6c9a5059a063d075bd504504fe62ca4a2ff Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 7 Jun 2021 13:23:51 +0300 Subject: [PATCH 036/160] CSCFAIRMETA-1047: add configuration to replace text in docs --- docs/api/conf.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/docs/api/conf.py b/docs/api/conf.py index 65d2f480..b892e426 100755 --- a/docs/api/conf.py +++ b/docs/api/conf.py @@ -12,7 +12,7 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os +import os # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -28,7 +28,9 @@ # The full version, including alpha/beta/rc tags release = "" -fqdn = "metax.fairdata.fi" +domain = os.getenv("DOMAIN", "metax.fairdata.fi") +branch = os.getenv("BRANCH", "master") +etsin_url = os.getenv("ETSIN_URL", "etsin.fairdata.fi") # -- General configuration --------------------------------------------------- @@ -68,6 +70,11 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" +replacements = { + "__METAX_ENV_DOMAIN__": f"{domain}", + "__METAX_ENV_BRANCH__": f"{branch}", + "__ETSIN_ENV_BASE_URL__": f"{etsin_url}" +} # -- Options for HTML output ------------------------------------------------- @@ -112,7 +119,6 @@ # # html_sidebars = {} -rst_prolog = f".. |__METAX_ENV_DOMAIN__| replace:: {fqdn}" # -- Options for HTMLHelp output --------------------------------------------- @@ -169,6 +175,13 @@ ), ] +# from https://github.com/sphinx-doc/sphinx/issues/4054 +def replace(app, docname, source): + for key, value in app.config.replacements.items(): + source[0] = source[0].replace(key, value) + def setup(app): + app.add_config_value('replacements', {}, True) + app.connect('source-read', replace) app.add_css_file("custom.css") From b85adf4ad520e1d96105fe090dfbdaf41218e541 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Thu, 3 Jun 2021 15:30:27 +0300 Subject: [PATCH 037/160] CSCFAIRMETA-1047: refactor tables to support variable length width --- docs/api/v1/metax_api.rst | 20 +++++----- docs/api/v1/reference_data.rst | 72 ++++++++++++---------------------- docs/api/v2/metax_api.rst | 21 +++++----- docs/api/v2/reference_data.rst | 71 ++++++++++++--------------------- 4 files changed, 71 insertions(+), 113 deletions(-) diff --git a/docs/api/v1/metax_api.rst b/docs/api/v1/metax_api.rst index aa9585ca..6d10a8f4 100755 --- a/docs/api/v1/metax_api.rst +++ b/docs/api/v1/metax_api.rst @@ -17,12 +17,14 @@ Any Python code examples in the API documentation are written using Python versi Below is a rough outline what type of APIs Metax is currently providing. The rest of the documentation will cover how to interact with them. -+-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| API | Description | -+-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/rest | The main API to interact with most resources inside Metax, such as datasets, files, and data catalogs. | -+-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/rpc | A Remote Procedure Call -API, to execute various actions in Metax, or retrieve data that otherwise does not fit into the REST api. | -+-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/oaipmh | Implements the OAI-PMH specification. The specification defines a way to harvest filtered sets of data (datasets) in an incremental manner. | -+-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ +.. list-table:: + :header-rows: 1 + + * - API + - Description + * - https://__METAX_ENV_DOMAIN__/rest/v1 + - The main API to interact with most resources inside Metax, such as datasets, files, and data catalogs. + * - https://__METAX_ENV_DOMAIN__/rpc/v1 + - A Remote Procedure Call -API, to execute various actions in Metax, or retrieve data that otherwise does not fit into the REST api. + * - https://__METAX_ENV_DOMAIN__/oai/ + - Implements the OAI-PMH specification. The specification defines a way to harvest filtered sets of data (datasets) in an incremental manner. diff --git a/docs/api/v1/reference_data.rst b/docs/api/v1/reference_data.rst index 9f7ce98c..303c3089 100755 --- a/docs/api/v1/reference_data.rst +++ b/docs/api/v1/reference_data.rst @@ -15,54 +15,30 @@ Reference Data Indexes Below table contains a full list of the different indexes stored in the reference data. -+----------------------------------------------------------------------------------------+ -| Url to browse reference data | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/access_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_role/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/event_outcome/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/field_of_science/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/file_format_version/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/file_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/funder_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/identifier_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/keyword/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/language/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/license/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/lifecycle_event/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/location/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/mime_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/preservation_event/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/relation_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/research_infra/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/resource_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/restriction_grounds/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/use_category/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/organization_data/organization/_search?pretty=true | -+----------------------------------------------------------------------------------------+ - +.. list-table:: + + * - https://__METAX_ENV_DOMAIN__/es/reference_data/access_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_role/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/event_outcome/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/field_of_science/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/file_format_version/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/file_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/funder_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/identifier_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/keyword/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/language/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/license/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/lifecycle_event/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/location/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/mime_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/preservation_event/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/relation_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/research_infra/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/resource_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/restriction_grounds/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/use_category/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/organization_data/organization/_search?pretty=true How to use reference data when uploading data to Metax? diff --git a/docs/api/v2/metax_api.rst b/docs/api/v2/metax_api.rst index d4cab41a..0a1aa86c 100755 --- a/docs/api/v2/metax_api.rst +++ b/docs/api/v2/metax_api.rst @@ -15,12 +15,15 @@ Any Python code examples in the API documentation are written using Python versi Below is a rough outline what type of APIs Metax is currently providing. The rest of the documentation will cover how to interact with them. -+--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| API | Description | -+--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/rest/v2 | The main API to interact with most resources inside Metax, such as datasets, files, and data catalogs. | -+--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/rpc/v2 | A Remote Procedure Call -API, to execute various actions in Metax, or retrieve data that otherwise does not fit into the REST api. | -+--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/oaipmh | Implements the OAI-PMH specification. The specification defines a way to harvest filtered sets of data (datasets) in an incremental manner. | -+--------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+ +.. list-table:: + :widths: 50 150 + :header-rows: 1 + + * - API + - Description + * - https://__METAX_ENV_DOMAIN__/rest/v2 + - The main API to interact with most resources inside Metax, such as datasets, files, and data catalogs. + * - https://__METAX_ENV_DOMAIN__/rpc/v2 + - A Remote Procedure Call -API, to execute various actions in Metax, or retrieve data that otherwise does not fit into the REST api. + * - https://__METAX_ENV_DOMAIN__/oai/ + - Implements the OAI-PMH specification. The specification defines a way to harvest filtered sets of data (datasets) in an incremental manner. diff --git a/docs/api/v2/reference_data.rst b/docs/api/v2/reference_data.rst index c70b3836..08b0da86 100755 --- a/docs/api/v2/reference_data.rst +++ b/docs/api/v2/reference_data.rst @@ -15,53 +15,30 @@ Reference Data Indexes Below table contains a full list of the different indexes stored in the reference data. -+----------------------------------------------------------------------------------------+ -| Url to browse reference data | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/access_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_role/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/event_outcome/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/field_of_science/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/file_format_version/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/file_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/funder_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/identifier_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/keyword/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/language/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/license/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/lifecycle_event/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/location/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/mime_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/preservation_event/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/relation_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/research_infra/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/resource_type/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/restriction_grounds/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/reference_data/use_category/_search?pretty=true | -+----------------------------------------------------------------------------------------+ -| https://__METAX_ENV_DOMAIN__/es/organization_data/organization/_search?pretty=true | -+----------------------------------------------------------------------------------------+ +.. list-table:: + + * - https://__METAX_ENV_DOMAIN__/es/reference_data/access_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_role/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/contributor_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/event_outcome/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/field_of_science/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/file_format_version/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/file_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/funder_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/identifier_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/keyword/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/language/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/license/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/lifecycle_event/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/location/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/mime_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/preservation_event/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/relation_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/research_infra/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/resource_type/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/restriction_grounds/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/reference_data/use_category/_search?pretty=true + * - https://__METAX_ENV_DOMAIN__/es/organization_data/organization/_search?pretty=true From 993a45cc449f30111b54c6b06364206804b129e3 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Fri, 4 Jun 2021 09:56:03 +0300 Subject: [PATCH 038/160] CSCFAIRMETA-1047: update dependenices --- poetry.lock | 280 +++++++++++++++++++++++++---------------------- pyproject.toml | 4 +- requirements.txt | 10 +- 3 files changed, 155 insertions(+), 139 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6fe4e5ae..3f528702 100644 --- a/poetry.lock +++ b/poetry.lock @@ -24,7 +24,7 @@ python-versions = "*" [[package]] name = "asgiref" -version = "3.3.4" +version = "3.4.1" description = "ASGI specs, helper code, and adapters" category = "main" optional = false @@ -168,7 +168,7 @@ toml = ["toml"] [[package]] name = "datacite" -version = "1.1.1" +version = "1.1.2" description = "Python API wrapper for the DataCite Metadata Store API." category = "main" optional = false @@ -195,7 +195,7 @@ python-versions = ">=3.5" [[package]] name = "django" -version = "3.1.12" +version = "3.1.13" description = "A high-level Python Web framework that encourages rapid development and clean, pragmatic design." category = "main" optional = false @@ -298,7 +298,7 @@ https = ["urllib3[secure] (>=1.24.1)"] [[package]] name = "elasticsearch" -version = "7.13.1" +version = "7.13.2" description = "Python client for Elasticsearch" category = "main" optional = false @@ -316,7 +316,7 @@ requests = ["requests (>=2.4.0,<3.0.0)"] [[package]] name = "executing" -version = "0.6.0" +version = "0.7.0" description = "Get the currently executing AST node of a frame, and other information" category = "dev" optional = false @@ -338,7 +338,7 @@ tornado = ["tornado (>=0.2)"] [[package]] name = "icecream" -version = "2.1.0" +version = "2.1.1" description = "Never use print() to debug again; inspect variables, expressions, and program execution with a single, simple function call." category = "dev" optional = false @@ -385,7 +385,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.5.0" +version = "4.6.1" description = "Read metadata from Python packages" category = "main" optional = false @@ -397,7 +397,8 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +perf = ["ipython"] +testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "ipdb" @@ -414,7 +415,7 @@ toml = {version = ">=0.10.2", markers = "python_version > \"3.6\""} [[package]] name = "ipython" -version = "7.24.1" +version = "7.25.0" description = "IPython: Productive Interactive Computing" category = "dev" optional = false @@ -479,16 +480,17 @@ six = "*" [[package]] name = "isort" -version = "5.8.0" +version = "5.9.1" description = "A Python utility / library to sort Python imports." category = "dev" optional = false -python-versions = ">=3.6,<4.0" +python-versions = ">=3.6.1,<4.0" [package.extras] pipfile_deprecated_finder = ["pipreqs", "requirementslib"] requirements_deprecated_finder = ["pipreqs", "pip-api"] colors = ["colorama (>=0.4.3,<0.5.0)"] +plugins = ["setuptools"] [[package]] name = "jedi" @@ -592,11 +594,11 @@ python-versions = "*" [[package]] name = "packaging" -version = "20.9" +version = "21.0" description = "Core utilities for Python packages" category = "main" optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" [package.dependencies] pyparsing = ">=2.0.2" @@ -655,7 +657,7 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.18" +version = "3.0.19" description = "Library for building powerful interactive command lines in Python" category = "dev" optional = false @@ -666,11 +668,11 @@ wcwidth = "*" [[package]] name = "psycopg2-binary" -version = "2.8.6" +version = "2.9.1" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +python-versions = ">=3.6" [[package]] name = "ptyprocess" @@ -731,11 +733,11 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "pyrsistent" -version = "0.17.3" +version = "0.18.0" description = "Persistent/Functional/Immutable data structures" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [[package]] name = "python-box" @@ -819,7 +821,7 @@ hiredis = ["hiredis (>=0.1.3)"] [[package]] name = "regex" -version = "2021.4.4" +version = "2021.7.6" description = "Alternative regular expression module, to replace re." category = "dev" optional = false @@ -877,17 +879,17 @@ python-versions = "*" [[package]] name = "sphinx" -version = "3.5.4" +version = "4.0.3" description = "Python documentation generator" category = "main" optional = true -python-versions = ">=3.5" +python-versions = ">=3.6" [package.dependencies] alabaster = ">=0.7,<0.8" babel = ">=1.3" colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.12,<0.17" +docutils = ">=0.14,<0.18" imagesize = "*" Jinja2 = ">=2.3" packaging = "*" @@ -903,7 +905,7 @@ sphinxcontrib-serializinghtml = "*" [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.800)", "docutils-stubs"] +lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.900)", "docutils-stubs", "types-typed-ast", "types-pkg-resources", "types-requests"] test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"] [[package]] @@ -1088,7 +1090,7 @@ python-versions = "*" [[package]] name = "urllib3" -version = "1.26.5" +version = "1.26.6" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -1117,7 +1119,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "zipp" -version = "3.4.1" +version = "3.5.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false @@ -1125,7 +1127,7 @@ python-versions = ">=3.6" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-cov", "pytest-enabler", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] [extras] docs = ["Sphinx", "sphinx-autobuild", "sphinx-rtd-theme"] @@ -1135,7 +1137,7 @@ swagger = ["PyYAML"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "22ec248442bd368b260536eca33ee3856f3f110a04bea6d61ebee9d14660a378" +content-hash = "f902a1619b9a3a0cdc7233bee55b16b6b3167d7ce087716503df3c9bd39fe484" [metadata.files] alabaster = [ @@ -1151,8 +1153,8 @@ appnope = [ {file = "appnope-0.1.2.tar.gz", hash = "sha256:dd83cd4b5b460958838f6eb3000c660b1f9caf2a5b1de4264e941512f603258a"}, ] asgiref = [ - {file = "asgiref-3.3.4-py3-none-any.whl", hash = "sha256:92906c611ce6c967347bbfea733f13d6313901d54dcca88195eaeb52b2a8e8ee"}, - {file = "asgiref-3.3.4.tar.gz", hash = "sha256:d1216dfbdfb63826470995d31caed36225dcaf34f182e0fa257a4dd9e86f1b78"}, + {file = "asgiref-3.4.1-py3-none-any.whl", hash = "sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214"}, + {file = "asgiref-3.4.1.tar.gz", hash = "sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9"}, ] asttokens = [ {file = "asttokens-2.0.5-py2.py3-none-any.whl", hash = "sha256:0844691e88552595a6f4a4281a9f7f79b8dd45ca4ccea82e5e05b4bbdb76705c"}, @@ -1247,16 +1249,16 @@ coverage = [ {file = "coverage-5.5.tar.gz", hash = "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c"}, ] datacite = [ - {file = "datacite-1.1.1-py2.py3-none-any.whl", hash = "sha256:842b52b47380b658c728f4596dcab4fdaf524ebcfb895a337be267b4a2e9b1bf"}, - {file = "datacite-1.1.1.tar.gz", hash = "sha256:4e3d3153d849f0a5f331ba585bf4d2f6fcc87f63738bbf9621f19f3727ae3e4d"}, + {file = "datacite-1.1.2-py2.py3-none-any.whl", hash = "sha256:a9dc41df3413e2d7d854bda5cea38cfdccabc9cd6ce3652cecd47db534c76c29"}, + {file = "datacite-1.1.2.tar.gz", hash = "sha256:0164bc2ff35bba643897201eb359611abb43ff5811a9ac17fb5592cd643b4443"}, ] decorator = [ {file = "decorator-5.0.9-py3-none-any.whl", hash = "sha256:6e5c199c16f7a9f0e3a61a4a54b3d27e7dad0dbdde92b944426cb20914376323"}, {file = "decorator-5.0.9.tar.gz", hash = "sha256:72ecfba4320a893c53f9706bebb2d55c270c1e51a28789361aa93e4a21319ed5"}, ] django = [ - {file = "Django-3.1.12-py3-none-any.whl", hash = "sha256:a523d62b7ab2908f551dabc32b99017a86aa7784e32b761708e52be3dce6d35d"}, - {file = "Django-3.1.12.tar.gz", hash = "sha256:dc41bf07357f1f4810c1c555b685cb51f780b41e37892d6cc92b89789f2847e1"}, + {file = "Django-3.1.13-py3-none-any.whl", hash = "sha256:a6e0d1ff11095b7394c079ade7094c73b2dc3df4a7a373c9b58ed73b77a97feb"}, + {file = "Django-3.1.13.tar.gz", hash = "sha256:9f8be75646f62204320b195062b1d696ba28aa3d45ee72fb7c888ffaebc5bdb2"}, ] django-debug-toolbar = [ {file = "django-debug-toolbar-3.2.1.tar.gz", hash = "sha256:a5ff2a54f24bf88286f9872836081078f4baa843dc3735ee88524e89f8821e33"}, @@ -1295,19 +1297,19 @@ dulwich = [ {file = "dulwich-0.19.16.tar.gz", hash = "sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850"}, ] elasticsearch = [ - {file = "elasticsearch-7.13.1-py2.py3-none-any.whl", hash = "sha256:a09ae1de8869efa6ef2d9a0a9b9f6d9260b0c2506e83dd32bc1119a23fff49a5"}, - {file = "elasticsearch-7.13.1.tar.gz", hash = "sha256:d6bcca0b2e5665d08e6fe6fadc2d4d321affd76ce483603078fc9d3ccd2bc0f9"}, + {file = "elasticsearch-7.13.2-py2.py3-none-any.whl", hash = "sha256:583459eaa864d0e4c11b4e0314569999fa780748856bfaeb8d714fc9243c26a2"}, + {file = "elasticsearch-7.13.2.tar.gz", hash = "sha256:fd8b3a267da279ae78f7923c549d488403cdbf83a98299bb130feb832d014b8f"}, ] executing = [ - {file = "executing-0.6.0-py2.py3-none-any.whl", hash = "sha256:a2f10f802b4312b92bd256279b43720271b0d9b540a0dbab7be4c28fbc536479"}, - {file = "executing-0.6.0.tar.gz", hash = "sha256:a07046e608c56948a899e1c7dc45327ed84ee67edf245041eb8c6722658c14e3"}, + {file = "executing-0.7.0-py2.py3-none-any.whl", hash = "sha256:1971c98963857f2c03f4b688d93fc4b28ce756bd102955ea8ea7ce0a7fd9a28f"}, + {file = "executing-0.7.0.tar.gz", hash = "sha256:509fe590e9da1c0659a273c42493a25af6f43d61cf36f085fc1b6cf2c6419d1f"}, ] gunicorn = [ {file = "gunicorn-20.1.0.tar.gz", hash = "sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8"}, ] icecream = [ - {file = "icecream-2.1.0-py2.py3-none-any.whl", hash = "sha256:4c441862751e9a8b52c1a5b551056a3430361230b8c845a7953f8b6e400a4f27"}, - {file = "icecream-2.1.0.tar.gz", hash = "sha256:c2e7b74c1c12caa2cfde050f2e636493ee77a9fb4a494b5593418ab359924a24"}, + {file = "icecream-2.1.1-py2.py3-none-any.whl", hash = "sha256:adc1c48f5a4f83b2171c774a35142f217d317a84fca8cdf3fc65aa1321ff26b6"}, + {file = "icecream-2.1.1.tar.gz", hash = "sha256:47e00e3f4e8477996e7dc420b6fa8ba53f8ced17de65320fedb5b15997b76589"}, ] idna = [ {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"}, @@ -1322,15 +1324,15 @@ imagesize = [ {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.5.0-py3-none-any.whl", hash = "sha256:833b26fb89d5de469b24a390e9df088d4e52e4ba33b01dc5e0e4f41b81a16c00"}, - {file = "importlib_metadata-4.5.0.tar.gz", hash = "sha256:b142cc1dd1342f31ff04bb7d022492b09920cb64fed867cd3ea6f80fe3ebd139"}, + {file = "importlib_metadata-4.6.1-py3-none-any.whl", hash = "sha256:9f55f560e116f8643ecf2922d9cd3e1c7e8d52e683178fecd9d08f6aa357e11e"}, + {file = "importlib_metadata-4.6.1.tar.gz", hash = "sha256:079ada16b7fc30dfbb5d13399a5113110dab1aa7c2bc62f66af75f0b717c8cac"}, ] ipdb = [ {file = "ipdb-0.13.9.tar.gz", hash = "sha256:951bd9a64731c444fd907a5ce268543020086a697f6be08f7cc2c9a752a278c5"}, ] ipython = [ - {file = "ipython-7.24.1-py3-none-any.whl", hash = "sha256:d513e93327cf8657d6467c81f1f894adc125334ffe0e4ddd1abbb1c78d828703"}, - {file = "ipython-7.24.1.tar.gz", hash = "sha256:9bc24a99f5d19721fb8a2d1408908e9c0520a17fff2233ffe82620847f17f1b6"}, + {file = "ipython-7.25.0-py3-none-any.whl", hash = "sha256:aa21412f2b04ad1a652e30564fff6b4de04726ce875eab222c8430edc6db383a"}, + {file = "ipython-7.25.0.tar.gz", hash = "sha256:54bbd1fe3882457aaf28ae060a5ccdef97f212a741754e420028d4ec5c2291dc"}, ] ipython-genutils = [ {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, @@ -1344,8 +1346,8 @@ isodate = [ {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, ] isort = [ - {file = "isort-5.8.0-py3-none-any.whl", hash = "sha256:2bb1680aad211e3c9944dbce1d4ba09a989f04e238296c87fe2139faa26d655d"}, - {file = "isort-5.8.0.tar.gz", hash = "sha256:0a943902919f65c5684ac4e0154b1ad4fac6dcaa5d9f3426b732f1c8b5419be6"}, + {file = "isort-5.9.1-py3-none-any.whl", hash = "sha256:8e2c107091cfec7286bc0f68a547d0ba4c094d460b732075b6fba674f1035c0c"}, + {file = "isort-5.9.1.tar.gz", hash = "sha256:83510593e07e433b77bd5bff0f6f607dbafa06d1a89022616f02d8b699cfcd56"}, ] jedi = [ {file = "jedi-0.18.0-py2.py3-none-any.whl", hash = "sha256:18456d83f65f400ab0c2d3319e48520420ef43b23a086fdc05dff34132f0fb93"}, @@ -1445,8 +1447,8 @@ mypy-extensions = [ {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] packaging = [ - {file = "packaging-20.9-py2.py3-none-any.whl", hash = "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"}, - {file = "packaging-20.9.tar.gz", hash = "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5"}, + {file = "packaging-21.0-py3-none-any.whl", hash = "sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"}, + {file = "packaging-21.0.tar.gz", hash = "sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7"}, ] parso = [ {file = "parso-0.8.2-py2.py3-none-any.whl", hash = "sha256:a8c4922db71e4fdb90e0d0bc6e50f9b273d3397925e5e60a717e719201778d22"}, @@ -1469,45 +1471,39 @@ pika = [ {file = "pika-1.2.0.tar.gz", hash = "sha256:f023d6ac581086b124190cb3dc81dd581a149d216fa4540ac34f9be1e3970b89"}, ] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.18-py3-none-any.whl", hash = "sha256:bf00f22079f5fadc949f42ae8ff7f05702826a97059ffcc6281036ad40ac6f04"}, - {file = "prompt_toolkit-3.0.18.tar.gz", hash = "sha256:e1b4f11b9336a28fa11810bc623c357420f69dfdb6d2dac41ca2c21a55c033bc"}, + {file = "prompt_toolkit-3.0.19-py3-none-any.whl", hash = "sha256:7089d8d2938043508aa9420ec18ce0922885304cddae87fb96eebca942299f88"}, + {file = "prompt_toolkit-3.0.19.tar.gz", hash = "sha256:08360ee3a3148bdb5163621709ee322ec34fc4375099afa4bbf751e9b7b7fa4f"}, ] psycopg2-binary = [ - {file = "psycopg2-binary-2.8.6.tar.gz", hash = "sha256:11b9c0ebce097180129e422379b824ae21c8f2a6596b159c7659e2e5a00e1aa0"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:d14b140a4439d816e3b1229a4a525df917d6ea22a0771a2a78332273fd9528a4"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:1fabed9ea2acc4efe4671b92c669a213db744d2af8a9fc5d69a8e9bc14b7a9db"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f5ab93a2cb2d8338b1674be43b442a7f544a0971da062a5da774ed40587f18f5"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27m-win32.whl", hash = "sha256:b4afc542c0ac0db720cf516dd20c0846f71c248d2b3d21013aa0d4ef9c71ca25"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27m-win_amd64.whl", hash = "sha256:e74a55f6bad0e7d3968399deb50f61f4db1926acf4a6d83beaaa7df986f48b1c"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:0deac2af1a587ae12836aa07970f5cb91964f05a7c6cdb69d8425ff4c15d4e2c"}, - {file = "psycopg2_binary-2.8.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ad20d2eb875aaa1ea6d0f2916949f5c08a19c74d05b16ce6ebf6d24f2c9f75d1"}, - {file = "psycopg2_binary-2.8.6-cp34-cp34m-win32.whl", hash = "sha256:950bc22bb56ee6ff142a2cb9ee980b571dd0912b0334aa3fe0fe3788d860bea2"}, - {file = "psycopg2_binary-2.8.6-cp34-cp34m-win_amd64.whl", hash = "sha256:b8a3715b3c4e604bcc94c90a825cd7f5635417453b253499664f784fc4da0152"}, - {file = "psycopg2_binary-2.8.6-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:d1b4ab59e02d9008efe10ceabd0b31e79519da6fb67f7d8e8977118832d0f449"}, - {file = "psycopg2_binary-2.8.6-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:ac0c682111fbf404525dfc0f18a8b5f11be52657d4f96e9fcb75daf4f3984859"}, - {file = "psycopg2_binary-2.8.6-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7d92a09b788cbb1aec325af5fcba9fed7203897bbd9269d5691bb1e3bce29550"}, - {file = "psycopg2_binary-2.8.6-cp35-cp35m-win32.whl", hash = "sha256:aaa4213c862f0ef00022751161df35804127b78adf4a2755b9f991a507e425fd"}, - {file = "psycopg2_binary-2.8.6-cp35-cp35m-win_amd64.whl", hash = "sha256:c2507d796fca339c8fb03216364cca68d87e037c1f774977c8fc377627d01c71"}, - {file = "psycopg2_binary-2.8.6-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:ee69dad2c7155756ad114c02db06002f4cded41132cc51378e57aad79cc8e4f4"}, - {file = "psycopg2_binary-2.8.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:e82aba2188b9ba309fd8e271702bd0d0fc9148ae3150532bbb474f4590039ffb"}, - {file = "psycopg2_binary-2.8.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d5227b229005a696cc67676e24c214740efd90b148de5733419ac9aaba3773da"}, - {file = "psycopg2_binary-2.8.6-cp36-cp36m-win32.whl", hash = "sha256:a0eb43a07386c3f1f1ebb4dc7aafb13f67188eab896e7397aa1ee95a9c884eb2"}, - {file = "psycopg2_binary-2.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:e1f57aa70d3f7cc6947fd88636a481638263ba04a742b4a37dd25c373e41491a"}, - {file = "psycopg2_binary-2.8.6-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:833709a5c66ca52f1d21d41865a637223b368c0ee76ea54ca5bad6f2526c7679"}, - {file = "psycopg2_binary-2.8.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ba28584e6bca48c59eecbf7efb1576ca214b47f05194646b081717fa628dfddf"}, - {file = "psycopg2_binary-2.8.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6a32f3a4cb2f6e1a0b15215f448e8ce2da192fd4ff35084d80d5e39da683e79b"}, - {file = "psycopg2_binary-2.8.6-cp37-cp37m-win32.whl", hash = "sha256:0e4dc3d5996760104746e6cfcdb519d9d2cd27c738296525d5867ea695774e67"}, - {file = "psycopg2_binary-2.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:cec7e622ebc545dbb4564e483dd20e4e404da17ae07e06f3e780b2dacd5cee66"}, - {file = "psycopg2_binary-2.8.6-cp38-cp38-macosx_10_9_x86_64.macosx_10_9_intel.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:ba381aec3a5dc29634f20692349d73f2d21f17653bda1decf0b52b11d694541f"}, - {file = "psycopg2_binary-2.8.6-cp38-cp38-manylinux1_i686.whl", hash = "sha256:a0c50db33c32594305b0ef9abc0cb7db13de7621d2cadf8392a1d9b3c437ef77"}, - {file = "psycopg2_binary-2.8.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:2dac98e85565d5688e8ab7bdea5446674a83a3945a8f416ad0110018d1501b94"}, - {file = "psycopg2_binary-2.8.6-cp38-cp38-win32.whl", hash = "sha256:bd1be66dde2b82f80afb9459fc618216753f67109b859a361cf7def5c7968729"}, - {file = "psycopg2_binary-2.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:8cd0fb36c7412996859cb4606a35969dd01f4ea34d9812a141cd920c3b18be77"}, - {file = "psycopg2_binary-2.8.6-cp39-cp39-macosx_10_9_x86_64.macosx_10_9_intel.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:89705f45ce07b2dfa806ee84439ec67c5d9a0ef20154e0e475e2b2ed392a5b83"}, - {file = "psycopg2_binary-2.8.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:42ec1035841b389e8cc3692277a0bd81cdfe0b65d575a2c8862cec7a80e62e52"}, - {file = "psycopg2_binary-2.8.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7312e931b90fe14f925729cde58022f5d034241918a5c4f9797cac62f6b3a9dd"}, - {file = "psycopg2_binary-2.8.6-cp39-cp39-win32.whl", hash = "sha256:6422f2ff0919fd720195f64ffd8f924c1395d30f9a495f31e2392c2efafb5056"}, - {file = "psycopg2_binary-2.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:15978a1fbd225583dd8cdaf37e67ccc278b5abecb4caf6b2d6b8e2b948e953f6"}, + {file = "psycopg2-binary-2.9.1.tar.gz", hash = "sha256:b0221ca5a9837e040ebf61f48899926b5783668b7807419e4adae8175a31f773"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:c250a7ec489b652c892e4f0a5d122cc14c3780f9f643e1a326754aedf82d9a76"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aef9aee84ec78af51107181d02fe8773b100b01c5dfde351184ad9223eab3698"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123c3fb684e9abfc47218d3784c7b4c47c8587951ea4dd5bc38b6636ac57f616"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:995fc41ebda5a7a663a254a1dcac52638c3e847f48307b5416ee373da15075d7"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:fbb42a541b1093385a2d8c7eec94d26d30437d0e77c1d25dae1dcc46741a385e"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-win32.whl", hash = "sha256:20f1ab44d8c352074e2d7ca67dc00843067788791be373e67a0911998787ce7d"}, + {file = "psycopg2_binary-2.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f6fac64a38f6768e7bc7b035b9e10d8a538a9fadce06b983fb3e6fa55ac5f5ce"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1e3a362790edc0a365385b1ac4cc0acc429a0c0d662d829a50b6ce743ae61b5a"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8559617b1fcf59a9aedba2c9838b5b6aa211ffedecabca412b92a1ff75aac1a"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36c7eb6152ba5467fb264d73844877be8b0847874d4822b7cf2d3c0cb8cdcb0"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:2f62c207d1740b0bde5c4e949f857b044818f734a3d57f1d0d0edc65050532ed"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:cfc523edecddaef56f6740d7de1ce24a2fdf94fd5e704091856a201872e37f9f"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-win32.whl", hash = "sha256:1e85b74cbbb3056e3656f1cc4781294df03383127a8114cbc6531e8b8367bf1e"}, + {file = "psycopg2_binary-2.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1473c0215b0613dd938db54a653f68251a45a78b05f6fc21af4326f40e8360a2"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:35c4310f8febe41f442d3c65066ca93cccefd75013df3d8c736c5b93ec288140"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c13d72ed6af7fd2c8acbd95661cf9477f94e381fce0792c04981a8283b52917"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14db1752acdd2187d99cb2ca0a1a6dfe57fc65c3281e0f20e597aac8d2a5bd90"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:aed4a9a7e3221b3e252c39d0bf794c438dc5453bc2963e8befe9d4cd324dff72"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:da113b70f6ec40e7d81b43d1b139b9db6a05727ab8be1ee559f3a69854a69d34"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-win32.whl", hash = "sha256:4235f9d5ddcab0b8dbd723dca56ea2922b485ea00e1dafacf33b0c7e840b3d32"}, + {file = "psycopg2_binary-2.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:988b47ac70d204aed01589ed342303da7c4d84b56c2f4c4b8b00deda123372bf"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:7360647ea04db2e7dff1648d1da825c8cf68dc5fbd80b8fb5b3ee9f068dcd21a"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca86db5b561b894f9e5f115d6a159fff2a2570a652e07889d8a383b5fae66eb4"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ced67f1e34e1a450cdb48eb53ca73b60aa0af21c46b9b35ac3e581cf9f00e31"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:0f2e04bd2a2ab54fa44ee67fe2d002bb90cee1c0f1cc0ebc3148af7b02034cbd"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:3242b9619de955ab44581a03a64bdd7d5e470cc4183e8fcadd85ab9d3756ce7a"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-win32.whl", hash = "sha256:0b7dae87f0b729922e06f85f667de7bf16455d411971b2043bbd9577af9d1975"}, + {file = "psycopg2_binary-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:b4d7679a08fea64573c969f6994a2631908bb2c0e69a7235648642f3d2e39a68"}, ] ptyprocess = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, @@ -1527,7 +1523,27 @@ pyparsing = [ {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, ] pyrsistent = [ - {file = "pyrsistent-0.17.3.tar.gz", hash = "sha256:2e636185d9eb976a18a8a8e96efce62f2905fea90041958d8cc2a189756ebf3e"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:da6e5e818d18459fa46fac0a4a4e543507fe1110e808101277c5a2b5bab0cd2d"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5e4395bbf841693eaebaa5bb5c8f5cdbb1d139e07c975c682ec4e4f8126e03d2"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-win32.whl", hash = "sha256:527be2bfa8dc80f6f8ddd65242ba476a6c4fb4e3aedbf281dfbac1b1ed4165b1"}, + {file = "pyrsistent-0.18.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2aaf19dc8ce517a8653746d98e962ef480ff34b6bc563fc067be6401ffb457c7"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58a70d93fb79dc585b21f9d72487b929a6fe58da0754fa4cb9f279bb92369396"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4916c10896721e472ee12c95cdc2891ce5890898d2f9907b1b4ae0f53588b710"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:73ff61b1411e3fb0ba144b8f08d6749749775fe89688093e1efef9839d2dcc35"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-win32.whl", hash = "sha256:b29b869cf58412ca5738d23691e96d8aff535e17390128a1a52717c9a109da4f"}, + {file = "pyrsistent-0.18.0-cp37-cp37m-win_amd64.whl", hash = "sha256:097b96f129dd36a8c9e33594e7ebb151b1515eb52cceb08474c10a5479e799f2"}, + {file = "pyrsistent-0.18.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:772e94c2c6864f2cd2ffbe58bb3bdefbe2a32afa0acb1a77e472aac831f83427"}, + {file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c1a9ff320fa699337e05edcaae79ef8c2880b52720bc031b219e5b5008ebbdef"}, + {file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd3caef37a415fd0dae6148a1b6957a8c5f275a62cca02e18474608cb263640c"}, + {file = "pyrsistent-0.18.0-cp38-cp38-win32.whl", hash = "sha256:e79d94ca58fcafef6395f6352383fa1a76922268fa02caa2272fff501c2fdc78"}, + {file = "pyrsistent-0.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:a0c772d791c38bbc77be659af29bb14c38ced151433592e326361610250c605b"}, + {file = "pyrsistent-0.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d5ec194c9c573aafaceebf05fc400656722793dac57f254cd4741f3c27ae57b4"}, + {file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:6b5eed00e597b5b5773b4ca30bd48a5774ef1e96f2a45d105db5b4ebb4bca680"}, + {file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:48578680353f41dca1ca3dc48629fb77dfc745128b56fc01096b2530c13fd426"}, + {file = "pyrsistent-0.18.0-cp39-cp39-win32.whl", hash = "sha256:f3ef98d7b76da5eb19c37fda834d50262ff9167c65658d1d8f974d2e4d90676b"}, + {file = "pyrsistent-0.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:404e1f1d254d314d55adb8d87f4f465c8693d6f902f67eb6ef5b4526dc58e6ea"}, + {file = "pyrsistent-0.18.0.tar.gz", hash = "sha256:773c781216f8c2900b42a7b638d5b517bb134ae1acbebe4d1e8f1f41ea60eb4b"}, ] python-box = [ {file = "python-box-5.3.0.tar.gz", hash = "sha256:4ed4ef5d34de505a65c01e3f1911de8cdb29484fcae0c035141dce535c6c194a"}, @@ -1588,47 +1604,47 @@ redis = [ {file = "redis-3.5.3.tar.gz", hash = "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2"}, ] regex = [ - {file = "regex-2021.4.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a"}, - {file = "regex-2021.4.4-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7"}, - {file = "regex-2021.4.4-cp36-cp36m-win32.whl", hash = "sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29"}, - {file = "regex-2021.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79"}, - {file = "regex-2021.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e"}, - {file = "regex-2021.4.4-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439"}, - {file = "regex-2021.4.4-cp37-cp37m-win32.whl", hash = "sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d"}, - {file = "regex-2021.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3"}, - {file = "regex-2021.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f"}, - {file = "regex-2021.4.4-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87"}, - {file = "regex-2021.4.4-cp38-cp38-win32.whl", hash = "sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac"}, - {file = "regex-2021.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2"}, - {file = "regex-2021.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c"}, - {file = "regex-2021.4.4-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042"}, - {file = "regex-2021.4.4-cp39-cp39-win32.whl", hash = "sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6"}, - {file = "regex-2021.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07"}, - {file = "regex-2021.4.4.tar.gz", hash = "sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb"}, + {file = "regex-2021.7.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e6a1e5ca97d411a461041d057348e578dc344ecd2add3555aedba3b408c9f874"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:6afe6a627888c9a6cfbb603d1d017ce204cebd589d66e0703309b8048c3b0854"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ccb3d2190476d00414aab36cca453e4596e8f70a206e2aa8db3d495a109153d2"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:ed693137a9187052fc46eedfafdcb74e09917166362af4cc4fddc3b31560e93d"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:99d8ab206a5270c1002bfcf25c51bf329ca951e5a169f3b43214fdda1f0b5f0d"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:b85ac458354165405c8a84725de7bbd07b00d9f72c31a60ffbf96bb38d3e25fa"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:3f5716923d3d0bfb27048242a6e0f14eecdb2e2a7fac47eda1d055288595f222"}, + {file = "regex-2021.7.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5983c19d0beb6af88cb4d47afb92d96751fb3fa1784d8785b1cdf14c6519407"}, + {file = "regex-2021.7.6-cp36-cp36m-win32.whl", hash = "sha256:c92831dac113a6e0ab28bc98f33781383fe294df1a2c3dfd1e850114da35fd5b"}, + {file = "regex-2021.7.6-cp36-cp36m-win_amd64.whl", hash = "sha256:791aa1b300e5b6e5d597c37c346fb4d66422178566bbb426dd87eaae475053fb"}, + {file = "regex-2021.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:59506c6e8bd9306cd8a41511e32d16d5d1194110b8cfe5a11d102d8b63cf945d"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:564a4c8a29435d1f2256ba247a0315325ea63335508ad8ed938a4f14c4116a5d"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:59c00bb8dd8775473cbfb967925ad2c3ecc8886b3b2d0c90a8e2707e06c743f0"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:9a854b916806c7e3b40e6616ac9e85d3cdb7649d9e6590653deb5b341a736cec"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:db2b7df831c3187a37f3bb80ec095f249fa276dbe09abd3d35297fc250385694"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:173bc44ff95bc1e96398c38f3629d86fa72e539c79900283afa895694229fe6a"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:15dddb19823f5147e7517bb12635b3c82e6f2a3a6b696cc3e321522e8b9308ad"}, + {file = "regex-2021.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ddeabc7652024803666ea09f32dd1ed40a0579b6fbb2a213eba590683025895"}, + {file = "regex-2021.7.6-cp37-cp37m-win32.whl", hash = "sha256:f080248b3e029d052bf74a897b9d74cfb7643537fbde97fe8225a6467fb559b5"}, + {file = "regex-2021.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:d8bbce0c96462dbceaa7ac4a7dfbbee92745b801b24bce10a98d2f2b1ea9432f"}, + {file = "regex-2021.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edd1a68f79b89b0c57339bce297ad5d5ffcc6ae7e1afdb10f1947706ed066c9c"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux1_i686.whl", hash = "sha256:422dec1e7cbb2efbbe50e3f1de36b82906def93ed48da12d1714cabcd993d7f0"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cbe23b323988a04c3e5b0c387fe3f8f363bf06c0680daf775875d979e376bd26"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0eb2c6e0fcec5e0f1d3bcc1133556563222a2ffd2211945d7b1480c1b1a42a6f"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:1c78780bf46d620ff4fff40728f98b8afd8b8e35c3efd638c7df67be2d5cddbf"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bc84fb254a875a9f66616ed4538542fb7965db6356f3df571d783f7c8d256edd"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:598c0a79b4b851b922f504f9f39a863d83ebdfff787261a5ed061c21e67dd761"}, + {file = "regex-2021.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875c355360d0f8d3d827e462b29ea7682bf52327d500a4f837e934e9e4656068"}, + {file = "regex-2021.7.6-cp38-cp38-win32.whl", hash = "sha256:e586f448df2bbc37dfadccdb7ccd125c62b4348cb90c10840d695592aa1b29e0"}, + {file = "regex-2021.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:2fe5e71e11a54e3355fa272137d521a40aace5d937d08b494bed4529964c19c4"}, + {file = "regex-2021.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6110bab7eab6566492618540c70edd4d2a18f40ca1d51d704f1d81c52d245026"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:4f64fc59fd5b10557f6cd0937e1597af022ad9b27d454e182485f1db3008f417"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:89e5528803566af4df368df2d6f503c84fbfb8249e6631c7b025fe23e6bd0cde"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2366fe0479ca0e9afa534174faa2beae87847d208d457d200183f28c74eaea59"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f9392a4555f3e4cb45310a65b403d86b589adc773898c25a39184b1ba4db8985"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:2bceeb491b38225b1fee4517107b8491ba54fba77cf22a12e996d96a3c55613d"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:f98dc35ab9a749276f1a4a38ab3e0e2ba1662ce710f6530f5b0a6656f1c32b58"}, + {file = "regex-2021.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319eb2a8d0888fa6f1d9177705f341bc9455a2c8aca130016e52c7fe8d6c37a3"}, + {file = "regex-2021.7.6-cp39-cp39-win32.whl", hash = "sha256:eaf58b9e30e0e546cdc3ac06cf9165a1ca5b3de8221e9df679416ca667972035"}, + {file = "regex-2021.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:4c9c3155fe74269f61e27617529b7f09552fbb12e44b1189cebbdb24294e6e1c"}, + {file = "regex-2021.7.6.tar.gz", hash = "sha256:8394e266005f2d8c6f0bc6780001f7afa3ef81a7a2111fa35058ded6fce79e4d"}, ] requests = [ {file = "requests-2.25.1-py2.py3-none-any.whl", hash = "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e"}, @@ -1647,8 +1663,8 @@ snowballstemmer = [ {file = "snowballstemmer-2.1.0.tar.gz", hash = "sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914"}, ] sphinx = [ - {file = "Sphinx-3.5.4-py3-none-any.whl", hash = "sha256:2320d4e994a191f4b4be27da514e46b3d6b420f2ff895d064f52415d342461e8"}, - {file = "Sphinx-3.5.4.tar.gz", hash = "sha256:19010b7b9fa0dc7756a6e105b2aacd3a80f798af3c25c273be64d7beeb482cb1"}, + {file = "Sphinx-4.0.3-py3-none-any.whl", hash = "sha256:5747f3c855028076fcff1e4df5e75e07c836f0ac11f7df886747231092cfe4ad"}, + {file = "Sphinx-4.0.3.tar.gz", hash = "sha256:dff357e6a208eb7edb2002714733ac21a9fe597e73609ff417ab8cf0c6b4fbb8"}, ] sphinx-autobuild = [ {file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"}, @@ -1783,8 +1799,8 @@ typing-extensions = [ {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"}, ] urllib3 = [ - {file = "urllib3-1.26.5-py2.py3-none-any.whl", hash = "sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c"}, - {file = "urllib3-1.26.5.tar.gz", hash = "sha256:a7acd0977125325f516bda9735fa7142b909a8d01e8b2e4c8108d0984e6e0098"}, + {file = "urllib3-1.26.6-py2.py3-none-any.whl", hash = "sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4"}, + {file = "urllib3-1.26.6.tar.gz", hash = "sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, @@ -1795,6 +1811,6 @@ xmltodict = [ {file = "xmltodict-0.12.0.tar.gz", hash = "sha256:50d8c638ed7ecb88d90561beedbf720c9b4e851a9fa6c47ebd64e99d166d8a21"}, ] zipp = [ - {file = "zipp-3.4.1-py3-none-any.whl", hash = "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098"}, - {file = "zipp-3.4.1.tar.gz", hash = "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76"}, + {file = "zipp-3.5.0-py3-none-any.whl", hash = "sha256:957cfda87797e389580cb8b9e3870841ca991e2125350677b2ca83a0e99390a3"}, + {file = "zipp-3.5.0.tar.gz", hash = "sha256:f5812b1e007e48cff63449a5e9f4e7ebea716b4111f9c4f9a645f91d579bf0c4"}, ] diff --git a/pyproject.toml b/pyproject.toml index d1127088..8742b747 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,12 +23,12 @@ django-split-settings = "^1.0.1" rdflib = "^5.0.0" python-dateutil = "^2.8.1" pyoai = {git = "https://github.com/infrae/pyoai", rev = "5f6eba12"} +python-box = "^5.3.0" python-simplexquery = {version = "*", optional = true} # These are here because of: https://github.com/python-poetry/poetry/issues/1644 -Sphinx = {version = "^3.5.4", optional = true} +Sphinx = {version = "^4.0.2", optional = true} sphinx-autobuild = {version = "^2021.3.14", optional = true} sphinx-rtd-theme = {version = "^0.5.2", optional = true} -python-box = "^5.3.0" PyYAML = { version = "^5.4.1", optional = true} [tool.poetry.dev-dependencies] diff --git a/requirements.txt b/requirements.txt index 5125b524..a180981a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,7 +39,7 @@ isbnid-fork==0.5.2 isodate==0.6.0 isort==5.8.0; python_version >= "3.6" and python_version < "4.0" jedi==0.18.0; python_version >= "3.7" -jinja2==2.11.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +jinja2==3.0.1; python_version >= "3.6" jsonschema==3.2.0 livereload==2.6.3; python_version >= "3.6" lxml==4.6.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") @@ -74,17 +74,17 @@ six==1.16.0; python_version >= "2.7" and python_full_version < "3.0.0" or python snowballstemmer==2.1.0; python_version >= "3.6" sphinx-autobuild==2021.3.14; python_version >= "3.6" sphinx-rtd-theme==0.5.2 -sphinx==3.5.4; python_version >= "3.5" +sphinx==4.0.2; python_version >= "3.6" sphinxcontrib-applehelp==1.0.2; python_version >= "3.6" sphinxcontrib-devhelp==1.0.2; python_version >= "3.6" -sphinxcontrib-htmlhelp==1.0.3; python_version >= "3.6" +sphinxcontrib-htmlhelp==2.0.0; python_version >= "3.6" sphinxcontrib-jsmath==1.0.1; python_version >= "3.6" sphinxcontrib-qthelp==1.0.3; python_version >= "3.6" -sphinxcontrib-serializinghtml==1.1.4; python_version >= "3.6" +sphinxcontrib-serializinghtml==1.1.5; python_version >= "3.6" sqlparse==0.4.1; python_version >= "3.6" structlog==21.1.0; python_version >= "3.6" tblib==1.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -toml==0.10.2; python_version > "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version > "3.6" +toml==0.10.2; python_version > "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version > "3.6" and python_version < "4" tornado==6.1; python_version >= "3.6" traitlets==5.0.5; python_version >= "3.7" typed-ast==1.4.3; python_version >= "3.6" From b27751909a6ee7d75d7e5e32daa10c4500487da3 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Fri, 4 Jun 2021 09:57:54 +0300 Subject: [PATCH 039/160] CSCFAIRMETA-1047: add docker related stuff and documentation --- config-swap-stack.yml | 10 ++++++++++ docker-compose.yml | 10 ++++++++++ docs/api/README.md | 29 ++++++++++++++++++++++------- docs/docker-stack.md | 14 ++++++++++---- sphinx.dockerfile | 10 ++++++++++ 5 files changed, 62 insertions(+), 11 deletions(-) create mode 100644 sphinx.dockerfile diff --git a/config-swap-stack.yml b/config-swap-stack.yml index 8b4ae404..dbb7a062 100644 --- a/config-swap-stack.yml +++ b/config-swap-stack.yml @@ -51,6 +51,16 @@ services: volumes: - metax-rabbitmq:/var/lib/rabbitmq + metax-docs: + image: fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-sphinx + hostname: metax-docs + environment: + DOMAIN: 0.0.0.0:8008 + ports: + - 8088:8000 + volumes: + - ./docs/api:/sphinx + volumes: metax-rabbitmq: external: true diff --git a/docker-compose.yml b/docker-compose.yml index 5c52aee5..09e1e661 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -61,6 +61,16 @@ services: target: /usr/local/apache2/conf/extra/httpd-metax.conf hostname: metax-auth + metax-docs: + image: fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-sphinx + hostname: metax-docs + environment: + DOMAIN: metax.fd-dev.csc.fi + ports: + - 8088:8000 + volumes: + - ./docs/api:/sphinx + fairdata-nginx: image: nginx:latest configs: diff --git a/docs/api/README.md b/docs/api/README.md index 335c3c76..ca4615e7 100644 --- a/docs/api/README.md +++ b/docs/api/README.md @@ -19,14 +19,29 @@ Although OpenApi is allowed to edit in json and yaml formats, in the case of Met A good starting point for studying OpenApi is [OpenApi specification V2](https://swagger.io/specification/v2/) ## Sphinx -### Enabling autobuilds -To install required dependencies run the following: -``pip install sphinx sphinx-autobuild sphinx_rtd_theme`` +The repository provides a Sphinx autobuild server in Docker container for conveniently write and develop the API documentation. +Below are the instructions how to use the server. -To start server, run following in metax-api directory: -``sphinx-autobuild -t {envtag} docs/source/ docs/build/``, where {envtag} is one of local_development, test, stable or production. Envtag determines the target environment of the documentation. +### Building the image -Note that the server should be run on the host machine since virtual machine does not build docs automatically. +The autobuild server can be built with following command from repo root: -To conditionally add parts of the documentation, use only -directive. See [This](https://github.com/sphinx-doc/sphinx/issues/1115) for known issue with this directive and headings. +`docker build -t fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-sphinx -f sphinx.dockerfile .` + +### Running the server in standalone container + +The server can be run with the following command, also from repo root: + +`docker run -it -v $PWD/docs/api:/sphinx/ -p 8088:8000 fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-sphinx` + +### Running the server in stack + +The autobuild server is also present in both of the stacks provided in the repo. The default dev env domain name `metax.fd-dev.csc.fi`is used in the documentation +so it should be added to `/etc/hosts` file to enable correct redirection in documentation links. When the domain name is added, the server is available from +`http://metax.fd-dev.csc.fi:8088`. By disabling browser cache redirection errors can be prevented. If all other fail, `0.0.0.0:8088` should work. + +### Additional notes + +To conditionally add parts of the documentation, use only -directive. See [This](https://github.com/sphinx-doc/sphinx/issues/1115) for known issue with this +directive and headings. diff --git a/docs/docker-stack.md b/docs/docker-stack.md index e39a8926..7c914e9d 100644 --- a/docs/docker-stack.md +++ b/docs/docker-stack.md @@ -48,29 +48,35 @@ To change existing configuration in metax-web container: ## Building related images -Docker image for metax-web can be built with the following command: +Docker images needed in the development can be built with the following commands: `docker build -t fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-web .` -Image for metax-httpd can be build with: - `docker build -t fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-httpd -f httpd.dockerfile .` +`docker build -t fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-sphinx -f sphinx.dockerfile .` + ## Pushing images to Artifactory Ensure that you are logged in to Artifactory: `docker login fairdata-docker.artifactory.ci.csc.fi` -Push commands for metax-web and metax-httpd respectively: +Push commands for docker images: `docker push fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-web` `docker push fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-httpd` +`docker push fairdata-docker.artifactory.ci.csc.fi/fairdata-metax-sphinx` + ## Running Metax management commands To run Metax management commands, locate the running metax-dev_metax-web container (can be done with `docker container ls`) and open terminal inside it with: `docker exec -it bash` + +## Developing API documentation + +The stack also contains a Sphinx autobuild server for documentation development. More specific instructions can be found from [here](api/README.md). diff --git a/sphinx.dockerfile b/sphinx.dockerfile new file mode 100644 index 00000000..71fd475c --- /dev/null +++ b/sphinx.dockerfile @@ -0,0 +1,10 @@ +FROM python:3.8-alpine + +RUN pip install Sphinx sphinx-autobuild sphinx-rtd-theme +RUN mkdir -p /sphinx/build + +WORKDIR /sphinx + +EXPOSE 8000 + +CMD ["sphinx-autobuild", "--host", "0.0.0.0", "-E", "-j", "auto", "/sphinx/", "/sphinx/build/"] From 646c62a60a1475efeb1a8003bc838b110e4c0363 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 7 Jun 2021 14:19:56 +0300 Subject: [PATCH 040/160] Fix requirements.txt file generation command in README --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5878c391..8b6391bc 100755 --- a/README.md +++ b/README.md @@ -16,11 +16,12 @@ The recommended way to run the development setup is to use [Docker-swarm setup]( Install [Poetry](https://python-poetry.org/docs/) for your OS. Navigate to the repository root and run command `poetry install`. this will create and activate new Python virtualenv, installing all necessary Python packages to it. -You can generate traditional requirements.txt file with `poetry export --dev -E "simplexquery docs swagger" --without-hashes -f requirements.txt --output requirements.txt` + +You can generate traditional requirements.txt file with `poetry export --dev -E simplexquery -E docs -E swagger --without-hashes -o requirements.txt` ### Managing dependencies -__NOTICE: Please remember to execute `poetry export --dev -E "simplexquery docs swagger" --without-hashes -f requirements.txt --output requirements.txt` after any additions, updates or removals.__ +__NOTICE: Please remember to generate the requirements.txt file after any additions, updates or removals using the command above.__ Developer dependencies can be added with command `poetry add -D ` Application dependencies can be added with command `poetry add ` From 3cbeb3a3da7dd7091253cf14c3d5a9db7ac31f65 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Thu, 8 Jul 2021 10:21:30 +0300 Subject: [PATCH 041/160] CSCFAIRMETA-1047: fix bugs with python package dependencies --- poetry.lock | 6 +++--- requirements.txt | 42 +++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3f528702..05e4a555 100644 --- a/poetry.lock +++ b/poetry.lock @@ -298,7 +298,7 @@ https = ["urllib3[secure] (>=1.24.1)"] [[package]] name = "elasticsearch" -version = "7.13.2" +version = "7.13.3" description = "Python client for Elasticsearch" category = "main" optional = false @@ -1297,8 +1297,8 @@ dulwich = [ {file = "dulwich-0.19.16.tar.gz", hash = "sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850"}, ] elasticsearch = [ - {file = "elasticsearch-7.13.2-py2.py3-none-any.whl", hash = "sha256:583459eaa864d0e4c11b4e0314569999fa780748856bfaeb8d714fc9243c26a2"}, - {file = "elasticsearch-7.13.2.tar.gz", hash = "sha256:fd8b3a267da279ae78f7923c549d488403cdbf83a98299bb130feb832d014b8f"}, + {file = "elasticsearch-7.13.3-py2.py3-none-any.whl", hash = "sha256:76cc7670449676138acbab8872eb34867db033701310d9b01fb2ecfba5fb7234"}, + {file = "elasticsearch-7.13.3.tar.gz", hash = "sha256:d539d82552804b3d41033377b9adf11c39c749ee1764af3d74b56f42177e3281"}, ] executing = [ {file = "executing-0.7.0-py2.py3-none-any.whl", hash = "sha256:1971c98963857f2c03f4b688d93fc4b28ce756bd102955ea8ea7ce0a7fd9a28f"}, diff --git a/requirements.txt b/requirements.txt index a180981a..f7f3a93c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ alabaster==0.7.12; python_version >= "3.6" appdirs==1.4.4; python_version >= "3.6" appnope==0.1.2; sys_platform == "darwin" and python_version >= "3.7" -asgiref==3.3.4; python_version >= "3.6" +asgiref==3.4.1; python_version >= "3.6" asttokens==2.0.5 attrs==21.2.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" autosemver==0.5.5 @@ -11,55 +11,55 @@ black==20.8b1; python_version >= "3.6" certifi==2021.5.30; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" chardet==4.0.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" click==8.0.1; python_version >= "3.6" -colorama==0.4.4; python_version >= "3.7" and python_full_version < "3.0.0" and platform_system == "Windows" and sys_platform == "win32" or platform_system == "Windows" and python_version >= "3.7" and python_full_version >= "3.5.0" and sys_platform == "win32" +colorama==0.4.4; python_version >= "3.7" and python_full_version < "3.0.0" and sys_platform == "win32" and platform_system == "Windows" or sys_platform == "win32" and python_version >= "3.7" and python_full_version >= "3.5.0" and platform_system == "Windows" coverage==5.5; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") -datacite==1.1.1 +datacite==1.1.2 decorator==5.0.9; python_version >= "3.7" django-debug-toolbar==3.2.1; python_version >= "3.6" django-environ==0.4.5 django-rainbowtests==0.6.0 django-split-settings==1.0.1; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 -django==3.1.12; python_version >= "3.6" +django==3.1.13; python_version >= "3.6" djangorestframework==3.12.4; python_version >= "3.5" docutils==0.16; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" dulwich==0.19.16 -elasticsearch==7.13.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") -executing==0.6.0 +elasticsearch==7.13.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") +executing==0.7.0 gunicorn==20.1.0; python_version >= "3.5" -icecream==2.1.0 +icecream==2.1.1 idna==2.10; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" idutils==1.1.8 imagesize==1.2.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" -importlib-metadata==4.5.0; python_version >= "3.6" and python_version < "3.8" +importlib-metadata==4.6.1; python_version >= "3.6" and python_version < "3.8" ipdb==0.13.9; python_version >= "2.7" ipython-genutils==0.2.0; python_version >= "3.7" -ipython==7.24.1; python_version >= "3.7" +ipython==7.25.0; python_version >= "3.7" isbnid-fork==0.5.2 isodate==0.6.0 -isort==5.8.0; python_version >= "3.6" and python_version < "4.0" +isort==5.9.1; python_full_version >= "3.6.1" and python_version < "4.0" jedi==0.18.0; python_version >= "3.7" jinja2==3.0.1; python_version >= "3.6" jsonschema==3.2.0 livereload==2.6.3; python_version >= "3.6" lxml==4.6.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -markupsafe==1.1.1; python_version >= "3.5" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.5" +markupsafe==2.0.1; python_version >= "3.6" matplotlib-inline==0.1.2; python_version >= "3.7" mypy-extensions==0.4.3; python_version >= "3.6" -packaging==20.9; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +packaging==21.0; python_version >= "3.6" parso==0.8.2; python_version >= "3.7" pathspec==0.8.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" pexpect==4.8.0; sys_platform != "win32" and python_version >= "3.7" pickleshare==0.7.5; python_version >= "3.7" pika==1.2.0 -prompt-toolkit==3.0.18; python_full_version >= "3.6.1" and python_version >= "3.7" -psycopg2-binary==2.8.6; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") +prompt-toolkit==3.0.19; python_full_version >= "3.6.1" and python_version >= "3.7" +psycopg2-binary==2.9.1; python_version >= "3.6" ptyprocess==0.7.0; sys_platform != "win32" and python_version >= "3.7" pygments==2.9.0; python_version >= "3.7" pyjwt==2.1.0; python_version >= "3.6" pyoai @ git+https://github.com/infrae/pyoai@5f6eba12 -pyparsing==2.4.7; python_version >= "2.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" -pyrsistent==0.17.3; python_version >= "3.5" +pyparsing==2.4.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.6" +pyrsistent==0.18.0; python_version >= "3.6" python-box==5.3.0; python_version >= "3.6" python-dateutil==2.8.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0") python-simplexquery==1.0.5.3 @@ -67,14 +67,14 @@ pytz==2021.1; python_version >= "3.6" and python_full_version < "3.0.0" or pytho pyyaml==5.4.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.6.0") rdflib==5.0.0 redis==3.5.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -regex==2021.4.4; python_version >= "3.6" +regex==2021.7.6; python_version >= "3.6" requests==2.25.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" responses==0.13.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -six==1.16.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" +six==1.16.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" snowballstemmer==2.1.0; python_version >= "3.6" sphinx-autobuild==2021.3.14; python_version >= "3.6" sphinx-rtd-theme==0.5.2 -sphinx==4.0.2; python_version >= "3.6" +sphinx==4.0.3; python_version >= "3.6" sphinxcontrib-applehelp==1.0.2; python_version >= "3.6" sphinxcontrib-devhelp==1.0.2; python_version >= "3.6" sphinxcontrib-htmlhelp==2.0.0; python_version >= "3.6" @@ -89,7 +89,7 @@ tornado==6.1; python_version >= "3.6" traitlets==5.0.5; python_version >= "3.7" typed-ast==1.4.3; python_version >= "3.6" typing-extensions==3.10.0.0; python_version < "3.8" and python_version >= "3.6" -urllib3==1.26.5; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" +urllib3==1.26.6; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" wcwidth==0.2.5; python_full_version >= "3.6.1" and python_version >= "3.7" xmltodict==0.12.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") -zipp==3.4.1; python_version >= "3.6" and python_version < "3.8" +zipp==3.5.0; python_version >= "3.6" and python_version < "3.8" From 294949469378565a314cbb2cd855db00e3e92a2e Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 13 Jul 2021 09:47:09 +0300 Subject: [PATCH 042/160] fix coverage config and instructions for Docker env --- README.md | 6 +++++- src/.coveragerc | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8b6391bc..b753029d 100755 --- a/README.md +++ b/README.md @@ -57,7 +57,11 @@ run the tests with command `DJANGO_ENV=unittests python manage.py test --paralle ### Running coverage (Docker) -`docker exec -it -e DJANGO_ENV=unittests $(docker ps -q -f name="metax-web*") coverage run manage.py test --parallel` +Collect data: `docker exec -it -e DJANGO_ENV=unittests $(docker ps -q -f name="metax-web*") coverage run manage.py test --parallel` + +Combine it: `docker exec -it $(docker ps -q -f name="metax-web*") coverage combine` + +Report: `docker exec -it $(docker ps -q -f name="metax-web*") coverage report` ### Generating coverage report diff --git a/src/.coveragerc b/src/.coveragerc index df33cfa7..63a369dc 100755 --- a/src/.coveragerc +++ b/src/.coveragerc @@ -12,6 +12,7 @@ omit = metax_api/onappstart.py branch = True concurrency = multiprocessing +data_file = coverage_data/coverage.db [report] exclude_lines = From 83ae5bb6ae74f511b12eda965fddec6e79be8fad Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Wed, 21 Jul 2021 10:46:50 +0300 Subject: [PATCH 043/160] removed duplicate tests from v2 tests --- .../api/rest/v2/views/common/__init__.py | 3 +- .../tests/api/rest/v2/views/common/auth.py | 119 -- .../tests/api/rest/v2/views/common/read.py | 268 +-- .../tests/api/rest/v2/views/common/write.py | 341 ---- .../api/rest/v2/views/contracts/__init__.py | 8 - .../api/rest/v2/views/contracts/contracts.py | 285 --- .../rest/v2/views/datacatalogs/__init__.py | 1 - .../api/rest/v2/views/datacatalogs/read.py | 34 - .../api/rest/v2/views/datacatalogs/write.py | 150 +- .../api/rest/v2/views/datasets/__init__.py | 2 - .../tests/api/rest/v2/views/datasets/pas.py | 240 +-- .../tests/api/rest/v2/views/datasets/read.py | 1105 +---------- .../rest/v2/views/datasets/referencedata.py | 657 ------- .../tests/api/rest/v2/views/datasets/rems.py | 702 ------- .../tests/api/rest/v2/views/datasets/write.py | 1722 +---------------- .../api/rest/v2/views/directories/read.py | 1519 +-------------- .../tests/api/rest/v2/views/files/__init__.py | 1 - .../tests/api/rest/v2/views/files/read.py | 396 ---- .../tests/api/rest/v2/views/files/write.py | 1557 +-------------- .../rest/v2/views/filestorages/__init__.py | 9 - .../api/rest/v2/views/filestorages/read.py | 43 - .../api/rest/v2/views/filestorages/write.py | 53 - .../api/rest/v2/views/schemas/__init__.py | 8 - .../tests/api/rest/v2/views/schemas/read.py | 35 - .../tests/api/rpc/v2/views/__init__.py | 1 - .../tests/api/rpc/v2/views/common_rpc.py | 22 - .../tests/api/rpc/v2/views/dataset_rpc.py | 108 +- .../tests/api/rpc/v2/views/file_rpc.py | 44 +- .../tests/api/rpc/v2/views/statistic_rpc.py | 753 +------ .../tests/middleware/test_middleware_v2.py | 206 -- 30 files changed, 20 insertions(+), 10372 deletions(-) delete mode 100755 src/metax_api/tests/api/rest/v2/views/common/write.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/contracts/__init__.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/contracts/contracts.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/datasets/rems.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/files/read.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/filestorages/__init__.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/filestorages/read.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/filestorages/write.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/schemas/__init__.py delete mode 100755 src/metax_api/tests/api/rest/v2/views/schemas/read.py delete mode 100755 src/metax_api/tests/api/rpc/v2/views/common_rpc.py delete mode 100755 src/metax_api/tests/middleware/test_middleware_v2.py diff --git a/src/metax_api/tests/api/rest/v2/views/common/__init__.py b/src/metax_api/tests/api/rest/v2/views/common/__init__.py index 74a7ae5e..fb7f7576 100755 --- a/src/metax_api/tests/api/rest/v2/views/common/__init__.py +++ b/src/metax_api/tests/api/rest/v2/views/common/__init__.py @@ -6,5 +6,4 @@ # :license: MIT from .auth import * -from .read import * -from .write import * +from .read import * \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/common/auth.py b/src/metax_api/tests/api/rest/v2/views/common/auth.py index 125ff3bd..6b986c66 100755 --- a/src/metax_api/tests/api/rest/v2/views/common/auth.py +++ b/src/metax_api/tests/api/rest/v2/views/common/auth.py @@ -7,11 +7,9 @@ import logging -import responses from rest_framework import status from metax_api.tests.api.rest.base.views.datasets.write import CatalogRecordApiWriteCommon -from metax_api.tests.utils import get_test_oidc_token _logger = logging.getLogger(__name__) @@ -27,51 +25,6 @@ def setUp(self): # test user api_auth_user has some custom api permissions set in settings.py self._use_http_authorization(username="api_auth_user") - def test_read_access_ok(self): - """ - User api_auth_user should have read access to files api. - """ - response = self.client.get("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_access_fail(self): - """ - Anonymous user should not be able to access files and should not have any information - about the existence of requested file. - """ - self.client._credentials = {} - response = self.client.get("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_create_access_ok(self): - """ - User api_auth_user should have create access to datasets api. - """ - response = self.client.get("/rest/v2/datasets/1") - cr = response.data - cr["contract"] = 1 - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_update_access_error(self): - """ - User api_auth_user should not have update access to files api. - """ - response = self.client.get("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - file = response.data - file["file_format"] = "text/html" - - response = self.client.put("/rest/v2/files/1", file, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_delete_access_error(self): - """ - User api_auth_user should not have delete access to files api. - """ - response = self.client.delete("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - def test_read_for_world_ok(self): """ Reading datasets api should be permitted even without any authorization. @@ -79,75 +32,3 @@ def test_read_for_world_ok(self): self.client._credentials = {} response = self.client.get("/rest/v2/datasets/1") self.assertEqual(response.status_code, status.HTTP_200_OK) - - -class ApiEndUserAccessAuthorization(CatalogRecordApiWriteCommon): - - """ - Test End User authentication and authorization. - - Since the End User authnz utilizes OIDC, and there is no legit local OIDC OP, - responses from /secure/validate_token are mocked. The endpoint only returns - 200 OK for successful token validation, or 403 for failed validation. - """ - - def setUp(self): - super().setUp() - self._use_http_authorization(method="bearer", token=get_test_oidc_token()) - - @responses.activate - def test_valid_token(self): - """ - Test api authentication with a valid token. Validation is mocked, ensures code following - valid authentication works. Should return successfully. - """ - self._mock_token_validation_succeeds() - response = self.client.get("/rest/v2/datasets/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - @responses.activate - def test_invalid_token(self): - """ - Test api authentication with an invalid token. Validation is mocked, ensures code following - failed authentication works. Should return 403. - - Note: This test basically takes care of a lot of test cases regarding token validation. Since - token validation is executed by apache, it is rather opaque for testing purposes here, returning - only 403 on failures (or possibly 401 on some cases). - - Possible reasons for failures include: - - expired token - - invalid signature - - malformed token - - bad claims (such as intended audience) - - In all cases, metax code execution stops at the middleware where authentication failed. - """ - self._mock_token_validation_fails() - response = self.client.get("/rest/v2/datasets/1") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - @responses.activate - def test_end_user_read_access(self): - """ - Ensure end users are recognized in api read access permissions. - """ - self._mock_token_validation_succeeds() - - # datasets-api should be allowed for end users - response = self.client.get("/rest/v2/datasets/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - # contracts-api should not be allowed for end users - response = self.client.get("/rest/v2/contracts/1") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - @responses.activate - def test_end_user_create_access_error(self): - """ - Ensure end users are recognized in api create access permissions. - """ - self._mock_token_validation_succeeds() - # end users should not have create access to files api. - response = self.client.post("/rest/v2/files", {}, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) diff --git a/src/metax_api/tests/api/rest/v2/views/common/read.py b/src/metax_api/tests/api/rest/v2/views/common/read.py index 688a376d..757a800a 100755 --- a/src/metax_api/tests/api/rest/v2/views/common/read.py +++ b/src/metax_api/tests/api/rest/v2/views/common/read.py @@ -5,79 +5,11 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from datetime import timedelta - -from django.utils import timezone -from pytz import timezone as tz from rest_framework import status -from metax_api.models import CatalogRecord, File from metax_api.tests.api.rest.base.views.datasets.read import CatalogRecordApiReadCommon -class ApiReadGetDeletedObjects(CatalogRecordApiReadCommon): - - """ - Test use of query parameter removed=bool, which is common for all apis - """ - - def test_removed_query_param(self): - obj = CatalogRecord.objects.get(pk=1) - obj.removed = True - obj.force_save() - obj2 = CatalogRecord.objects.get(pk=2) - obj2.removed = True - obj2.force_save() - response = self.client.get("/rest/v2/datasets/1") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - response = self.client.get("/rest/v2/datasets/1?removed=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get("/rest/v2/datasets/metadata_version_identifiers") - self.assertEqual(obj.metadata_version_identifier not in response.data, True) - self.assertEqual(obj2.metadata_version_identifier not in response.data, True) - - obj = File.objects.get(pk=1) - obj.removed = True - obj.force_save() - obj2 = File.objects.get(pk=2) - obj2.removed = True - obj2.force_save() - response = self.client.get("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - response = self.client.get("/rest/v2/files/1?removed=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_removed_parameter_gets_correct_amount_of_objects(self): - path = "/rest/v2/datasets" - objects = CatalogRecord.objects.all().values() - - results = self.client.get("{0}?pagination=false&removed=false".format(path)).json() - initial_amt = len(results) - - results = self.client.get("{0}?pagination=false&removed=true".format(path)).json() - self.assertEqual( - len(results), - 0, - "Without removed objects remove=true should return 0 results", - ) - - self._use_http_authorization() - amt_to_delete = 2 - for i in range(amt_to_delete): - response = self.client.delete("{0}/{1}".format(path, objects[i]["id"])) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - - results = self.client.get("{0}?pagination=false&removed=false".format(path)).json() - self.assertEqual( - len(results), - initial_amt - amt_to_delete, - "Non-removed object amount is incorrect", - ) - - results = self.client.get("{0}?pagination=false&removed=true".format(path)).json() - self.assertEqual(len(results), amt_to_delete, "Removed object amount is incorrect") - - class ApiReadPaginationTests(CatalogRecordApiReadCommon): """ @@ -97,202 +29,4 @@ def test_read_catalog_record_list_pagination_1(self): response.data["results"][0]["id"], 1, "Id of first result should have been 1", - ) - - def test_read_catalog_record_list_pagination_2(self): - for param in ["pagination=true", "pagination", ""]: - response = self.client.get("/rest/datasets?{}&limit=2&offset=2".format(param)) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - len(response.data["results"]), - 2, - "There should have been exactly two results", - ) - self.assertEqual( - response.data["results"][0]["id"], - 3, - "Id of first result should have been 3", - ) - - def test_disable_pagination(self): - response = self.client.get("/rest/datasets?pagination=false") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("next" not in response.data, True) - self.assertEqual("results" not in response.data, True) - - def test_pagination_ordering(self): - limit = 5 - - for order in ("preservation_state", "-preservation_state"): - - # vary offset from 0 to 20, in increments of 5 - for offset in range(0, 20, 5): - - response = self.client.get( - f"/rest/v2/datasets?limit={limit}&offset={offset}&ordering={order}" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - from_api = [cr["preservation_state"] for cr in response.data["results"]] - - from_db = [ - r - for r in CatalogRecord.objects.filter() - .order_by(order) - .values_list("preservation_state", flat=True)[offset : offset + limit] - ] - - self.assertEqual(from_api, from_db) - - -class ApiReadHTTPHeaderTests(CatalogRecordApiReadCommon): - # - # header if-modified-since tests, single - # - - # If the value of the timestamp given in the header is equal or greater than the value of date_modified field, - # 404 should be returned since nothing has been modified. If the value of the timestamp given in the header is - # less than value of date_modified field, the object should be returned since it means the object has been - # modified after the header timestamp - - def test_get_with_if_modified_since_header_ok(self): - cr = CatalogRecord.objects.get(pk=self.pk) - date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - - if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S GMT") - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - if_modified_since_header_value = (date_modified_in_gmt + timedelta(seconds=1)).strftime( - "%a, %d %b %Y %H:%M:%S GMT" - ) - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - if_modified_since_header_value = (date_modified_in_gmt - timedelta(seconds=1)).strftime( - "%a, %d %b %Y %H:%M:%S GMT" - ) - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_get_with_if_modified_since_header_syntax_error(self): - cr = CatalogRecord.objects.get(pk=self.pk) - date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - - if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S UTC") - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets/%s" % self.identifier, **headers) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # - # header if-modified-since tests, list - # - - # List operation returns always 200 even if no datasets match the if-modified-since criterium - - def test_list_get_with_if_modified_since_header_ok(self): - cr = CatalogRecord.objects.get(pk=self.pk) - date_modified = cr.date_modified - date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) - - if_modified_since_header_value = date_modified_in_gmt.strftime("%a, %d %b %Y %H:%M:%S GMT") - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets?limit=100", **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get("results")) == 6) - - if_modified_since_header_value = (date_modified_in_gmt + timedelta(seconds=1)).strftime( - "%a, %d %b %Y %H:%M:%S GMT" - ) - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets?limit=100", **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get("results")) == 6) - - # The asserts below may brake if the date_modified timestamps or the amount of test data objects are altered - # in the test data - - if_modified_since_header_value = (date_modified_in_gmt - timedelta(seconds=1)).strftime( - "%a, %d %b %Y %H:%M:%S GMT" - ) - headers = {"HTTP_IF_MODIFIED_SINCE": if_modified_since_header_value} - response = self.client.get("/rest/v2/datasets?limit=100", **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get("results")) > 6) - self.assertTrue(len(response.data.get("results")) == 28) - - # should also work with records that have been recently created, and date_modified is empty - - cr.date_created = date_modified - cr.date_modified = None - cr.force_save() - response = self.client.get("/rest/v2/datasets?limit=100", **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(len(response.data.get("results")) > 6) - self.assertTrue(len(response.data.get("results")) == 28) - - -class ApiReadQueryParamTests(CatalogRecordApiReadCommon): - - """ - Misc common query params tests - """ - - def test_return_requested_fields_only(self): - """ - While the param ?fields works with write operations too, the primary use case is when GETting. - """ - response = self.client.get("/rest/v2/datasets?fields=identifier") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("identifier" in response.data["results"][0], True) - self.assertEqual(len(response.data["results"][0].keys()), 1) - self.assertEqual(len(response.data["results"][1].keys()), 1) - - response = self.client.get("/rest/v2/datasets/1?fields=identifier") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("identifier" in response.data, True) - self.assertEqual(len(response.data.keys()), 1) - - response = self.client.get("/rest/v2/datasets/1?fields=identifier,data_catalog") - self.assertEqual("identifier" in response.data, True) - self.assertEqual("data_catalog" in response.data, True) - self.assertEqual(len(response.data.keys()), 2) - - response = self.client.get("/rest/v2/datasets/1?fields=not_found") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # Anonymous user using fields parameter and not including research_dataset should not cause crashing - self.client._credentials = {} - response = self.client.get("/rest/v2/datasets/1?fields=identifier") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_checksum_field_for_file(self): - """ - Check that checksum field works correctly - """ - - self._use_http_authorization("metax") - response = self.client.get("/rest/v2/files/1?fields=checksum") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data.get("checksum"), "Checksum JSON should be returned") - self.assertTrue(response.data["checksum"].get("algorithm")) - self.assertTrue(response.data["checksum"].get("checked")) - self.assertTrue(response.data["checksum"].get("value")) - - response = self.client.get("/rest/v2/files/1?fields=checksum:value") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data.get("checksum"), "Checksum JSON should be returned") - self.assertTrue(response.data["checksum"].get("value")) - self.assertFalse(response.data["checksum"].get("algorithm")) - - response = self.client.get("/rest/v2/files/1?fields=checksum:badvalue") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue( - "is not part of" in response.data["detail"][0], - "Should complain about field not found", - ) + ) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/common/write.py b/src/metax_api/tests/api/rest/v2/views/common/write.py deleted file mode 100755 index 49ba9769..00000000 --- a/src/metax_api/tests/api/rest/v2/views/common/write.py +++ /dev/null @@ -1,341 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from copy import deepcopy -from time import sleep - -from django.core.management import call_command -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.models import CatalogRecord -from metax_api.tests.api.rest.base.views.datasets.write import CatalogRecordApiWriteCommon -from metax_api.tests.utils import TestClassUtils, test_data_file_path - -""" -Common phenomenas that concern all API's. - -The particular API selected for a test should not matter. However, -choosing to do tests using api /datasets is not a bad choice, since that -API is currently the most complex, where things are most likely to experience -a RUD. -""" - - -class ApiWriteCommon(APITestCase, TestClassUtils): - def setUp(self): - call_command("loaddata", test_data_file_path, verbosity=0) - self.test_new_data = self._get_new_test_data() - self._use_http_authorization() - - def _get_new_test_data(self): - record_from_test_data = self._get_object_from_test_data("catalogrecord", requested_index=0) - record_from_test_data.update( - { - "data_catalog": 1, - } - ) - record_from_test_data["research_dataset"].update( - { - "preferred_identifier": None, - } - ) - record_from_test_data.pop("id", None) - record_from_test_data.pop("identifier", None) - record_from_test_data.pop("contract", None) - return record_from_test_data - - -class ApiWriteCommonFieldsTests(ApiWriteCommon): - def test_certain_create_fields_are_read_only_after_create(self): - """ - The following fields should be read-only after initial creation of a resource: - - date_created - - user_created - - service_created - """ - response = self.client.post("/rest/v2/datasets", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # some of the fields could be empty in test data. that is fine tho, the point is that - # they should not change later. - orig_date_created = response.data.get("date_created", None) - orig_user_created = response.data.get("user_created", None) - orig_service_created = response.data.get("service_created", None) - - altered = response.data - altered["date_created"] = altered["date_created"].replace("2017", "2010") - altered["user_created"] = "changed" - altered["service_created"] = "changed" - - response = self.client.put("/rest/v2/datasets/%d" % altered["id"], altered, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get("/rest/v2/datasets/%d" % altered["id"], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(orig_date_created, response.data.get("date_created", None)) - self.assertEqual(orig_user_created, response.data.get("user_created", None)) - self.assertEqual(orig_service_created, response.data.get("service_created", None)) - - def test_deletion_sets_removed_true_and_sets_value_for_date_removed(self): - response = self.client.post("/rest/v2/datasets", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_id = response.data["id"] - response = self.client.delete("/rest/v2/datasets/%d" % cr_id) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - - # Verify date_removed got set - response = self.client.get("/rest/v2/datasets/%d?removed" % cr_id) - self.assertTrue(response.data["removed"] is True) - self.assertTrue(response.data.get("date_removed", "").startswith("2")) - - def test_updating_sets_removed_false_and_empties_date_removed(self): - response = self.client.post("/rest/v2/datasets", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_id = response.data["id"] - response = self.client.delete("/rest/v2/datasets/%d" % cr_id) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - rd = self.client.get("/rest/v2/datasets/%d?removed" % cr_id).data - rd_date_rem = rd["date_removed"] - sleep(1) # ensure that next request happens with different timestamp - response = self.client.put("/rest/v2/datasets/%d?removed" % cr_id, rd, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get("/rest/v2/datasets/%d" % cr_id) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue(response.data["removed"] is False) - self.assertTrue(response.data.get("date_removed") is None) - self.assertTrue( - response.data.get("date_modified") != rd_date_rem, - "date_modified should be updated", - ) - - -class ApiWriteHTTPHeaderTests(CatalogRecordApiWriteCommon): - - # - # header if-unmodified-since tests, single - # - - def test_update_with_if_unmodified_since_header_ok(self): - cr = self.client.get("/rest/v2/datasets/1").data - cr["preservation_description"] = "damn this is good coffee" - - cr_obj = CatalogRecord.objects.get(pk=1) - headers = { - "HTTP_IF_UNMODIFIED_SINCE": cr_obj.date_modified.strftime("%a, %d %b %Y %H:%M:%S GMT") - } - - response = self.client.put("/rest/v2/datasets/1", cr, format="json", **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_update_with_if_unmodified_since_header_precondition_failed_error(self): - cr = self.client.get("/rest/v2/datasets/1").data - cr["preservation_description"] = "the owls are not what they seem" - - headers = {"HTTP_IF_UNMODIFIED_SINCE": "Wed, 23 Sep 2009 22:15:29 GMT"} - - response = self.client.put("/rest/v2/datasets/1", cr, format="json", **headers) - self.assertEqual( - response.status_code, 412, "http status should be 412 = precondition failed" - ) - - def test_update_with_if_unmodified_since_header_syntax_error(self): - cr = self.client.get("/rest/v2/datasets/1").data - cr["preservation_description"] = "the owls are not what they seem" - - cr_obj = CatalogRecord.objects.get(pk=1) - headers = { - "HTTP_IF_UNMODIFIED_SINCE": cr_obj.date_modified.strftime("%a, %d %b %Y %H:%M:%S UTC") - } - - response = self.client.put("/rest/v2/datasets/1", cr, format="json", **headers) - self.assertEqual(response.status_code, 400, "http status should be 400") - - # - # header if-unmodified-since tests, list - # - - def test_update_list_with_if_unmodified_since_header_ok(self): - data_1 = self.client.get("/rest/v2/datasets/1", format="json").data - data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - - data_1["preservation_description"] = "damn this is good coffee" - data_2["preservation_description"] = "damn this is good coffee also" - - headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} - response = self.client.put("/rest/v2/datasets", [data_1, data_2], format="json", **headers) - - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_update_list_with_if_unmodified_since_header_error_1(self): - """ - One resource being updated was updated in the meantime, resulting in an error - """ - data_1 = self.client.get("/rest/v2/datasets/1", format="json").data - data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - - data_1["preservation_description"] = "damn this is good coffee" - - # should result in error for this record - data_2["date_modified"] = "2002-01-01T10:10:10Z" - - headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} - response = self.client.put("/rest/v2/datasets", [data_1, data_2], format="json", **headers) - self.assertEqual( - len(response.data["failed"]) == 1, - True, - "there should be only one failed update", - ) - self.assertEqual( - "modified" in response.data["failed"][0]["errors"]["detail"][0], - True, - "error should indicate resource has been modified", - ) - - def test_update_list_with_if_unmodified_since_header_error_2(self): - """ - Field date_modified is missing, while if-modified-since header is set, resulting in an error. - """ - data_1 = self.client.get("/rest/v2/datasets/1", format="json").data - data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - - data_1["preservation_description"] = "damn this is good coffee" - - # should result in error for this record - data_2.pop("date_modified") - - headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} - response = self.client.patch( - "/rest/v2/datasets", [data_1, data_2], format="json", **headers - ) - self.assertEqual( - "required" in response.data["failed"][0]["errors"]["detail"][0], - True, - "error should be about field date_modified is required", - ) - - def test_update_list_with_if_unmodified_since_header_error_3(self): - """ - One resource being updated has never been modified before. Make sure that date_modified = None - is an accepted value. The end result should be that the resource has been modified, since the - server version has a timestamp set in date_modified. - """ - data_1 = self.client.get("/rest/v2/datasets/1", format="json").data - data_2 = self.client.get("/rest/v2/datasets/2", format="json").data - - data_1["preservation_description"] = "damn this is good coffee" - data_2["preservation_description"] = "damn this is good coffee also" - data_2["date_modified"] = None - - headers = {"HTTP_IF_UNMODIFIED_SINCE": "value is not checked"} - response = self.client.put("/rest/v2/datasets", [data_1, data_2], format="json", **headers) - self.assertEqual( - "modified" in response.data["failed"][0]["errors"]["detail"][0], - True, - "error should indicate resource has been modified", - ) - - -class ApiWriteAtomicBulkOperations(CatalogRecordApiWriteCommon): - - """ - Test use of ?atomic=true/false parameter in bulk create and update operations. When atomic flag - is used, all changes should be rolled back if even one operation fails. - """ - - def test_atomic_create(self): - cr = self.client.get("/rest/v2/datasets/1", format="json").data - cr.pop("id") - cr.pop("identifier") - cr["research_dataset"].pop("metadata_version_identifier") - cr["research_dataset"].pop("preferred_identifier") - cr2 = deepcopy(cr) - cr3 = deepcopy(cr) - cr3.pop("data_catalog") # causes error - - record_count_before = CatalogRecord.objects.all().count() - - response = self.client.post("/rest/v2/datasets?atomic=true", [cr, cr2, cr3], format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) - self.assertEqual(len(response.data["success"]) == 0, True, response.data) - self.assertEqual(len(response.data["failed"]) == 1, True, response.data) - self.assertEqual("detail" in response.data, True, response.data) - self.assertEqual("atomic" in response.data["detail"][0], True, response.data) - self.assertEqual( - record_count_before, - CatalogRecord.objects.all().count(), - "shouldnt create new records", - ) - - def test_atomic_update(self): - cr = self.client.get("/rest/v2/datasets/1", format="json").data - cr2 = self.client.get("/rest/v2/datasets/2", format="json").data - cr3 = self.client.get("/rest/v2/datasets/3", format="json").data - cr["research_dataset"]["title"]["en"] = "updated" - cr2["research_dataset"]["title"]["en"] = "updated" - cr3.pop("data_catalog") # causes error - - record_count_before = CatalogRecord.objects.all().count() - - response = self.client.put("/rest/v2/datasets?atomic=true", [cr, cr2, cr3], format="json") - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual(len(response.data["success"]) == 0, True) - self.assertEqual(len(response.data["failed"]) == 1, True) - self.assertEqual("atomic" in response.data["detail"][0], True) - self.assertEqual( - record_count_before, - CatalogRecord.objects.all().count(), - "shouldnt create new versions", - ) - - cr = self.client.get("/rest/v2/datasets/1", format="json").data - cr2 = self.client.get("/rest/v2/datasets/2", format="json").data - self.assertEqual(cr["research_dataset"]["title"]["en"] == "updated", False) - self.assertEqual(cr2["research_dataset"]["title"]["en"] == "updated", False) - - -class ApiWriteQueryParamTests(ApiWriteCommon): - - """ - Misc common query params tests - """ - - def test_dryrun(self): - """ - Ensure query parameter ?dryrun=true returns same result as they normally would, but - changes made during the request do not get saved in the db. - """ - response = self.client.post( - "/rest/v2/datasets?dryrun=true", self.test_new_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("id" in response.data, True) - found = CatalogRecord.objects.filter(pk=response.data["id"]).exists() - self.assertEqual( - found, - False, - "record should not get truly created when using parameter dryrun", - ) - - -class ApiWriteCommonOperations(ApiWriteCommon): - """ - Common write operations - """ - - def test_create_file_with_empty_body_fails(self): - response = self.client.post("/rest/datasets") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("Request body is required" in response.data["detail"][0]) - - response = self.client.post("/rest/files") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("Request body is required" in response.data["detail"][0]) diff --git a/src/metax_api/tests/api/rest/v2/views/contracts/__init__.py b/src/metax_api/tests/api/rest/v2/views/contracts/__init__.py deleted file mode 100755 index bd6a6a7f..00000000 --- a/src/metax_api/tests/api/rest/v2/views/contracts/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from .contracts import ContractApiReadTestV1, ContractApiWriteTestV1 diff --git a/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py b/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py deleted file mode 100755 index cbf133af..00000000 --- a/src/metax_api/tests/api/rest/v2/views/contracts/contracts.py +++ /dev/null @@ -1,285 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from django.core.management import call_command -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.models import CatalogRecord, Contract -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class ContractApiReadTestV1(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(ContractApiReadTestV1, cls).setUpClass() - - def setUp(self): - contract_from_test_data = self._get_object_from_test_data("contract", requested_index=0) - self.pk = contract_from_test_data["id"] - self.identifier = contract_from_test_data["contract_json"]["identifier"] - self._use_http_authorization() - - def test_read_contract_list(self): - response = self.client.get("/rest/v2/datasets") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_contract_details_by_pk(self): - response = self.client.get("/rest/v2/contracts/%s" % self.pk) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_contract_details_by_identifier(self): - response = self.client.get("/rest/v2/contracts/%s" % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_contract_details_not_found(self): - response = self.client.get("/rest/v2/contracts/shouldnotexist") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - -class ContractApiWriteTestV1(APITestCase, TestClassUtils): - def setUp(self): - """ - Reloaded for every test case - """ - call_command("loaddata", test_data_file_path, verbosity=0) - self._use_http_authorization() - contract_from_test_data = self._get_object_from_test_data("contract") - self.pk = contract_from_test_data["id"] - - """ - New data that is sent to the server for POST, PUT, PATCH requests. Modified - slightly as approriate for different purposes - """ - self.test_new_data = self._get_new_test_data() - self.second_test_new_data = self._get_second_new_test_data() - self._use_http_authorization() - - def test_create_contract_with_existing_identifier(self): - self.test_new_data["pk"] = self.pk - response = self.client.post("/rest/v2/contracts/", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - response = self.client.post("/rest/v2/contracts/", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue( - "already exists" in response.data["contract_json"][0], - "Error regarding dublicated identifier", - ) - - def test_update_contract(self): - self.test_new_data["pk"] = self.pk - response = self.client.put( - "/rest/v2/contracts/%s" % self.pk, self.test_new_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_update_contract_not_found(self): - response = self.client.put( - "/rest/v2/contracts/doesnotexist", self.test_new_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_add_catalog_record_to_contract(self): - new_catalog_record = self.client.get("/rest/v2/datasets/1", format="json").data - new_catalog_record.pop("id") - new_catalog_record.pop("identifier") - new_catalog_record["research_dataset"].pop("preferred_identifier") - new_catalog_record["contract"] = self.pk - - response = self.client.post("/rest/v2/datasets", new_catalog_record, format="json") - created_catalog_record = response.data - - try: - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - except Exception: - print(response.data) - raise - self.assertEqual("research_dataset" in created_catalog_record.keys(), True) - self.assertEqual(created_catalog_record["contract"]["id"], self.pk) - - contract = Contract.objects.get(pk=self.pk) - try: - contract.records.get(pk=response.data["id"]) - except CatalogRecord.DoesNotExist: - raise Exception( - "The added CatalogRecord should appear in the relation contract.records" - ) - - response = self.client.get("/rest/v2/contracts/%d/datasets" % self.pk) - self.assertIn( - created_catalog_record["id"], - [cr["id"] for cr in response.data], - "The added CatalogRecord should appear in the results of /contracts/id/datasets", - ) - - def test_delete_contract(self): - url = "/rest/v2/contracts/%s" % self.pk - response = self.client.delete(url) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - response = self.client.get(url) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - deleted_contract = None - - try: - deleted_contract = Contract.objects.get(pk=self.pk) - except Contract.DoesNotExist: - pass - - if deleted_contract: - raise Exception( - "Deleted Contract should not be retrievable from the default objects table" - ) - - try: - deleted_contract = Contract.objects_unfiltered.get(pk=self.pk) - except Contract.DoesNotExist: - raise Exception("Deleted contract should not be deleted from the db") - - self.assertEqual( - deleted_contract.removed, - True, - "Deleted contract should be marked removed in the db", - ) - self.assertEqual( - deleted_contract.date_modified, - deleted_contract.date_removed, - "date_modified should be updated", - ) - - def test_delete_contract_catalog_records_are_marked_removed(self): - # add two new records to contract - new_catalog_record = self._get_new_catalog_record_test_data() - new_catalog_record["contract"] = self.pk - self.client.post("/rest/v2/datasets", new_catalog_record, format="json") - self.client.post("/rest/v2/datasets", new_catalog_record, format="json") - - self.client.delete("/rest/v2/contracts/%s" % self.pk) - contract = Contract.objects_unfiltered.get(pk=self.pk) - related_crs = contract.records(manager="objects_unfiltered").all() - response_get_1 = self.client.get("/rest/v2/datasets/%d" % related_crs[0].id) - self.assertEqual( - response_get_1.status_code, - status.HTTP_404_NOT_FOUND, - "CatalogRecords of deleted contracts should not be retrievable through the api", - ) - response_get_2 = self.client.get("/rest/v2/datasets/%d" % related_crs[1].id) - self.assertEqual( - response_get_2.status_code, - status.HTTP_404_NOT_FOUND, - "CatalogRecords of deleted contracts should not be retrievable through the api", - ) - - for cr in related_crs: - self.assertEqual( - cr.removed, - True, - "Related CatalogRecord objects should be marked as removed", - ) - - def test_deleted_catalog_record_is_not_listed_in_contract_datasets_api(self): - deleted_id = 1 - self.client.delete("/rest/v2/datasets/%d" % deleted_id) - response = self.client.get("/rest/v2/contracts/%d/datasets" % self.pk) - self.assertNotIn( - deleted_id, - [cr["id"] for cr in response.data], - "The deleted CatalogRecord should not appear in the results of /contracts/id/datasets", - ) - - def _get_new_test_data(self): - return { - "contract_json": { - "title": "Title of new contract", - "identifier": "optional-identifier-new", - "quota": 111204, - "created": "2014-01-17T08:19:58Z", - "modified": "2014-01-17T08:19:58Z", - "description": "Description of unknown length", - "contact": [ - { - "name": "Contact Name", - "phone": "+358501231234", - "email": "contact.email@csc.fi", - } - ], - "organization": { - "organization_identifier": "1234567abc", - "name": "Mysterious organization", - }, - "related_service": [{"identifier": "local:service:id", "name": "Name of Service"}], - "validity": {"start_date": "2014-01-17"}, - } - } - - def _get_second_new_test_data(self): - return { - "contract_json": { - "title": "Title of second contract", - "identifier": "optional-identifier-for-second", - "quota": 111204, - "created": "2014-01-17T08:19:58Z", - "modified": "2014-01-17T08:19:58Z", - "description": "Description of unknown length", - "contact": [ - { - "name": "Contact Name", - "phone": "+358501231234", - "email": "contact.email@csc.fi", - } - ], - "organization": { - "organization_identifier": "1234567abc", - "name": "Mysterious organization", - }, - "related_service": [{"identifier": "local:service:id", "name": "Name of Service"}], - "validity": {"start_date": "2014-01-17"}, - } - } - - def _get_new_catalog_record_test_data(self): - catalog_record_from_test_data = self._get_object_from_test_data( - "catalogrecord", requested_index=0 - ) - return { - "identifier": "http://urn.fi/urn:nbn:fi:iiidentifier", - "data_catalog": self._get_object_from_test_data("datacatalog", requested_index=0), - "research_dataset": { - "modified": "2014-01-17T08:19:58Z", - "version_notes": ["This version contains changes to x and y."], - "title": {"en": "Wonderful Title"}, - "description": [ - { - "en": "A descriptive description describing the contents of this dataset. Must be descriptive." - } - ], - "creator": [ - { - "@type": "Person", - "name": "Teppo Testaaja", - "member_of": { - "@type": "Organization", - "name": {"fi": "Mysterious Organization"}, - }, - } - ], - "curator": [ - { - "@type": "Organization", - "name": {"en": "Curator org", "fi": "Organisaatio"}, - } - ], - "language": [{"identifier": "http://lexvo.org/id/iso639-3/aar"}], - "total_files_byte_size": 1024, - "files": catalog_record_from_test_data["research_dataset"]["files"], - }, - } diff --git a/src/metax_api/tests/api/rest/v2/views/datacatalogs/__init__.py b/src/metax_api/tests/api/rest/v2/views/datacatalogs/__init__.py index 6adb6e69..d60060fb 100755 --- a/src/metax_api/tests/api/rest/v2/views/datacatalogs/__init__.py +++ b/src/metax_api/tests/api/rest/v2/views/datacatalogs/__init__.py @@ -5,5 +5,4 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from .read import * from .write import * diff --git a/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py b/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py deleted file mode 100755 index 46f29bcd..00000000 --- a/src/metax_api/tests/api/rest/v2/views/datacatalogs/read.py +++ /dev/null @@ -1,34 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from django.core.management import call_command -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class DataCatalogApiReadBasicTests(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(DataCatalogApiReadBasicTests, cls).setUpClass() - - def setUp(self): - data_catalog_from_test_data = self._get_object_from_test_data( - "datacatalog", requested_index=0 - ) - self._use_http_authorization() - self.pk = data_catalog_from_test_data["id"] - self.identifier = data_catalog_from_test_data["catalog_json"]["identifier"] - - def test_basic_get(self): - response = self.client.get("/rest/v2/datacatalogs/%s" % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py b/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py index 6a0a60fb..c0f1dc99 100755 --- a/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py +++ b/src/metax_api/tests/api/rest/v2/views/datacatalogs/write.py @@ -9,8 +9,6 @@ from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import DataCatalog -from metax_api.services.redis_cache_service import RedisClient from metax_api.tests.utils import TestClassUtils, test_data_file_path @@ -31,14 +29,6 @@ def setUp(self): class DataCatalogApiWriteBasicTests(DataCatalogApiWriteCommon): - def test_identifier_is_auto_generated(self): - response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertNotEqual( - response.data["catalog_json"].get("identifier", None), - None, - "identifier should be created", - ) def test_research_dataset_schema_missing_ok(self): self.new_test_data["catalog_json"].pop("research_dataset_schema", None) @@ -55,142 +45,4 @@ def test_disallow_versioning_in_harvested_catalogs(self): self.new_test_data["catalog_json"]["harvested"] = True response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("versioning" in response.data["detail"][0], True, response.data) - - def test_create_identifier_already_exists(self): - response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "already exists" in response.data["catalog_json"]["identifier"][0], - True, - response.data, - ) - - def test_delete(self): - response = self.client.delete("/rest/v2/datacatalogs/1") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - - dc_deleted = DataCatalog.objects_unfiltered.get(pk=1) - self.assertEqual(dc_deleted.removed, True) - self.assertEqual( - dc_deleted.date_modified, - dc_deleted.date_removed, - "date_modified should be updated", - ) - - -class DataCatalogApiWriteReferenceDataTests(DataCatalogApiWriteCommon): - """ - Tests related to reference_data validation and data catalog fields population - from reference_data, according to given uri or code as the value. - """ - - def test_create_data_catalog_with_invalid_reference_data(self): - dc = self.new_test_data["catalog_json"] - dc["field_of_science"][0]["identifier"] = "nonexisting" - dc["language"][0]["identifier"] = "nonexisting" - dc["access_rights"]["access_type"][0]["identifier"] = "nonexisting" - dc["access_rights"]["license"][0]["identifier"] = "nonexisting" - response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("catalog_json" in response.data.keys(), True) - self.assertEqual(len(response.data["catalog_json"]), 4) - - def test_create_data_catalog_populate_fields_from_reference_data(self): - """ - 1) Insert codes from cached reference data to data catalog identifier fields - that will be validated, and then populated - 2) Check that that the values in data catalog identifier fields are changed from - codes to uris after a successful create - 3) Check that labels have also been copied to data catalog to their approriate fields - """ - cache = RedisClient() - refdata = cache.get("reference_data")["reference_data"] - orgdata = cache.get("reference_data")["organization_data"] - refs = {} - - data_types = [ - "access_type", - "field_of_science", - "language", - "license", - ] - - # the values in these selected entries will be used throghout the rest of the test case - for dtype in data_types: - entry = refdata[dtype][0] - refs[dtype] = { - "code": entry["code"], - "uri": entry["uri"], - "label": entry.get("label", None), - } - - refs["organization"] = { - "uri": orgdata["organization"][1]["uri"], - "code": orgdata["organization"][1]["code"], - "label": orgdata["organization"][1]["label"], - } - - # replace the relations with objects that have only the identifier set with code as value, - # to easily check that label was populated (= that it appeared in the dataset after create) - # without knowing its original value from the generated test data - dc = self.new_test_data["catalog_json"] - dc["field_of_science"][0] = {"identifier": refs["field_of_science"]["code"]} - dc["language"][0] = {"identifier": refs["language"]["code"]} - dc["access_rights"]["access_type"][0] = {"identifier": refs["access_type"]["code"]} - dc["access_rights"]["license"][0] = {"identifier": refs["license"]["code"]} - - # these have other required fields, so only update the identifier with code - dc["publisher"]["identifier"] = refs["organization"]["code"] - dc["access_rights"]["has_rights_related_agent"][0]["identifier"] = refs["organization"][ - "code" - ] - - response = self.client.post("/rest/v2/datacatalogs", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("catalog_json" in response.data.keys(), True) - - new_dc = response.data["catalog_json"] - self._assert_uri_copied_to_identifier(refs, new_dc) - self._assert_label_copied_to_pref_label(refs, new_dc) - self._assert_label_copied_to_title(refs, new_dc) - self._assert_label_copied_to_name(refs, new_dc) - - def _assert_uri_copied_to_identifier(self, refs, new_dc): - self.assertEqual( - refs["field_of_science"]["uri"], new_dc["field_of_science"][0]["identifier"] - ) - self.assertEqual(refs["language"]["uri"], new_dc["language"][0]["identifier"]) - self.assertEqual( - refs["access_type"]["uri"], - new_dc["access_rights"]["access_type"][0]["identifier"], - ) - self.assertEqual( - refs["license"]["uri"], new_dc["access_rights"]["license"][0]["identifier"] - ) - self.assertEqual(refs["organization"]["uri"], new_dc["publisher"]["identifier"]) - self.assertEqual( - refs["organization"]["uri"], - new_dc["access_rights"]["has_rights_related_agent"][0]["identifier"], - ) - - def _assert_label_copied_to_pref_label(self, refs, new_dc): - self.assertEqual( - refs["field_of_science"]["label"], - new_dc["field_of_science"][0].get("pref_label", None), - ) - self.assertEqual( - refs["access_type"]["label"], - new_dc["access_rights"]["access_type"][0].get("pref_label", None), - ) - - def _assert_label_copied_to_title(self, refs, new_dc): - self.assertEqual( - refs["license"]["label"], - new_dc["access_rights"]["license"][0].get("title", None), - ) - - def _assert_label_copied_to_name(self, refs, new_dc): - self.assertEqual(refs["organization"]["label"], new_dc["publisher"]["name"]) + self.assertEqual("versioning" in response.data["detail"][0], True, response.data) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/__init__.py b/src/metax_api/tests/api/rest/v2/views/datasets/__init__.py index 548b55da..95739e3d 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/__init__.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/__init__.py @@ -10,6 +10,4 @@ from .filehandling import * from .pas import * from .read import * -from .referencedata import * -from .rems import * from .write import * diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/pas.py b/src/metax_api/tests/api/rest/v2/views/datasets/pas.py index fbbabc18..365782d9 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/pas.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/pas.py @@ -5,140 +5,15 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from datetime import timedelta - from django.conf import settings as django_settings from rest_framework import status -from metax_api.models import CatalogRecordV2, Contract, DataCatalog +from metax_api.models import CatalogRecordV2, DataCatalog from metax_api.utils import get_tz_aware_now_without_micros -from .read import CatalogRecordApiReadCommon from .write import CatalogRecordApiWriteCommon -class CatalogRecordApiReadPreservationStateTests(CatalogRecordApiReadCommon): - - """ - preservation_state filtering - """ - - def test_read_catalog_record_search_by_preservation_state(self): - """ - Various simple filtering requests - """ - response = self.client.get("/rest/v2/datasets?preservation_state=0") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - len(response.data["results"]) > 2, - True, - "There should have been multiple results for preservation_state=0 request", - ) - - response = self.client.get("/rest/v2/datasets?preservation_state=10") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 2) - - response = self.client.get("/rest/v2/datasets?preservation_state=40") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1) - - def test_read_catalog_record_search_by_preservation_state_666(self): - response = self.client.get("/rest/v2/datasets?preservation_state=666") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 0, "should return empty list") - - def test_read_catalog_record_search_by_preservation_state_many(self): - response = self.client.get("/rest/v2/datasets?preservation_state=10,40") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 3) - self.assertEqual(response.data["results"][0]["preservation_state"], 10) - self.assertEqual(response.data["results"][1]["preservation_state"], 10) - self.assertEqual(response.data["results"][2]["preservation_state"], 40) - - def test_read_catalog_record_search_by_preservation_state_invalid_value(self): - response = self.client.get("/rest/v2/datasets?preservation_state=1,a") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "is not an integer" in response.data["preservation_state"][0], - True, - "Error should say letter a is not an integer", - ) - - response = self.client.get("/rest/v2/datasets?preservation_state=1,a") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "is not an integer" in response.data["preservation_state"][0], - True, - "Error should say letter a is not an integer", - ) - - -class CatalogRecordApiReadPASFilter(CatalogRecordApiReadCommon): - def test_pas_filter(self): - """ - Test query param pas_filter which should search from various fields using the same search term. - """ - - # set test conditions - cr = CatalogRecordV2.objects.get(pk=1) - cr.preservation_state = 10 - cr.contract_id = 1 - cr.research_dataset["title"]["en"] = "Catch me if you can" - cr.research_dataset["title"]["fi"] = "Ota kiinni jos saat" - cr.research_dataset["curator"] = [] - cr.research_dataset["curator"].append({"name": "Seppo Hovi"}) - cr.research_dataset["curator"].append({"name": "Esa Nieminen"}) - cr.research_dataset["curator"].append({"name": "Aku Ankka"}) - cr.research_dataset["curator"].append({"name": "Jaska Jokunen"}) - cr.force_save() - - contract = Contract.objects.get(pk=1) - contract.contract_json["title"] = "An Important Agreement" - contract.save() - - metax_user = django_settings.API_METAX_USER - self._use_http_authorization( - username=metax_user["username"], password=metax_user["password"] - ) - - # beging testing - - response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=if you") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1) - - response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=kiinni jos") - self.assertEqual(len(response.data["results"]), 1) - - response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=niemine") - self.assertEqual(len(response.data["results"]), 1) - - # more than 3 curators, requires typing exact case-sensitive name... see comments in related code - response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=jokunen") - self.assertEqual(len(response.data["results"]), 0) - response = self.client.get( - "/rest/v2/datasets?preservation_state=10&pas_filter=Jaska Jokunen" - ) - self.assertEqual(len(response.data["results"]), 1) - - # contract_id 1 has several other associated test datasets - response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=agreement") - self.assertEqual(len(response.data["results"]), 3) - - response = self.client.get( - "/rest/v2/datasets?preservation_state=10&pas_filter=does not exist" - ) - self.assertEqual(len(response.data["results"]), 0) - - def test_pas_filter_is_restricted(self): - """ - Query param is permitted to users metax and tpas. - """ - response = self.client.get("/rest/v2/datasets?preservation_state=10&pas_filter=hmmm") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - class CatalogRecordApiWritePreservationStateTests(CatalogRecordApiWriteCommon): """ @@ -172,20 +47,6 @@ def setUp(self): catalog_record_services_edit="testuser,api_auth_user,metax", ) - def test_update_catalog_record_pas_state_allowed_value(self): - cr = self.client.get("/rest/v2/datasets/1").data - cr["preservation_state"] = 30 - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual( - cr.preservation_state_modified - >= get_tz_aware_now_without_micros() - timedelta(seconds=5), - True, - "Timestamp should have been updated during object update", - ) - def test_update_pas_state_to_needs_revalidation(self): """ When dataset metadata is updated, and preservation_state in (40, 50, 70), metax should @@ -226,40 +87,6 @@ def test_prevent_file_changes_when_record_in_pas_process(self): response.data, ) - def test_non_pas_dataset_unallowed_preservation_state_values(self): - # update non-pas dataset - cr = self.client.get("/rest/v2/datasets/1").data - - values = [ - 11, # not one of known values - 90, # value not allowed for non-pas datasets - ] - - for invalid_value in values: - cr["preservation_state"] = invalid_value - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - - def test_pas_dataset_unallowed_preservation_state_values(self): - # create pas dataset and update with invalid values - cr = self.client.get("/rest/v2/datasets/1").data - cr["preservation_state"] = 80 - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - cr = self.client.get( - "/rest/v2/datasets/%d" % response.data["preservation_dataset_version"]["id"] - ).data - - values = [ - 70, # value not allowed for non-pas datasets - 111, # not one of known values - 150, # not one of known values - ] - - for invalid_value in values: - cr["preservation_state"] = invalid_value - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - def test_pas_version_is_created_on_preservation_state_80(self): """ When preservation_state is updated to 'accepted to pas', a copy should be created into @@ -318,67 +145,4 @@ def test_pas_version_is_created_on_preservation_state_80(self): ) # note: trying to assert querysets will result in failure. must evaluate the querysets first by iterating them - self.assertEqual([f for f in cr_files], [f for f in cr_pas_files]) - - def test_origin_dataset_cant_have_multiple_pas_versions(self): - """ - If state is update to 'accepted to pas', and relation preservation_dataset_version - is detected, an error should be raised. - """ - self._create_pas_dataset_from_id(1) - - cr_data = {"preservation_state": 80} - response = self.client.patch("/rest/v2/datasets/1", cr_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual( - "already has a PAS version" in response.data["detail"][0], - True, - response.data, - ) - - def test_dataset_can_be_created_directly_into_pas_catalog(self): - """ - Datasets that are created directly into PAS catalog should not have any enforced - rules about changing preservation_state value. - """ - self.cr_test_data["data_catalog"] = django_settings.PAS_DATA_CATALOG_IDENTIFIER - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual( - response.data["research_dataset"]["preferred_identifier"].startswith("doi"), - True, - response.data["research_dataset"]["preferred_identifier"], - ) - - # when created directly into pas catalog, preservation_state can be updated - # to whatever, whenever - ps_values = [v[0] for v in CatalogRecordV2.PRESERVATION_STATE_CHOICES] - for ps in ps_values: - cr_data = {"preservation_state": ps} - response = self.client.patch( - "/rest/v2/datasets/%d" % response.data["id"], cr_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr_data = {"preservation_state": 0} - response = self.client.patch( - "/rest/v2/datasets/%d" % response.data["id"], cr_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_unfreezing_files_does_not_deprecate_pas_dataset(self): - """ - Even if the origin dataset is deprecated as a result of unfreezing its files, - the PAS dataset should be safe from being deprecated, as the files have already - been stored in PAS. - """ - cr = self._create_pas_dataset_from_id(1) - response = self.client.delete("/rest/v2/files/1", format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get( - "/rest/v2/datasets/%d" % cr["preservation_dataset_version"]["id"], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["deprecated"], False) + self.assertEqual([f for f in cr_files], [f for f in cr_pas_files]) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/read.py b/src/metax_api/tests/api/rest/v2/views/datasets/read.py index 8f5ad4a6..7e3bd197 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/read.py @@ -5,15 +5,10 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -import urllib.parse from copy import deepcopy -from datetime import timedelta -import responses from django.conf import settings from django.core.management import call_command -from django.utils import timezone -from pytz import timezone as tz from rest_framework import status from rest_framework.test import APITestCase @@ -55,241 +50,6 @@ def create_legacy_dataset(self): return response.data["id"] -class CatalogRecordApiReadBasicTests(CatalogRecordApiReadCommon): - - """ - Basic read operations - """ - - def test_read_catalog_record_list(self): - response = self.client.get("/rest/v2/datasets") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_catalog_record_list_by_ids(self): - # Successful tests - - cr_id_lists = [ - [1, 4, 6], - [1, 4, 6, 777], - [ - "cr955e904-e3dd-4d7e-99f1-3fed446f96d7", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d6", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", - ], - [ - "cr955e904-e3dd-4d7e-99f1-3fed446f96d7", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d6", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", - "something", - ], - ] - - for id_list in cr_id_lists: - response = self.client.post("/rest/v2/datasets/list", id_list, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 3) - - # check that fields parameter works - response = self.client.post("/rest/v2/datasets/list?fields=id", id_list, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 3) - self.assertEqual(len(response.data["results"][0].keys()), 1) - self.assertEqual(list(response.data["results"][0].keys()), ["id"]) - - # Failing/empty tests - - cr_bad_lists = [["something"], [999, 777]] - - for bad_list in cr_bad_lists: - response = self.client.post("/rest/v2/datasets/list", bad_list, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["results"], []) - - response = self.client.post("/rest/v2/datasets/list", [], format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("Received empty list of identifiers" in response.data["detail"]) - - def test_read_catalog_record_details_by_pk(self): - response = self.client.get("/rest/v2/datasets/%s" % self.pk) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["identifier"], self.identifier) - self.assertEqual("identifier" in response.data["data_catalog"], True) - - def test_read_catalog_record_details_by_identifier(self): - response = self.client.get("/rest/v2/datasets/%s" % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["identifier"], self.identifier) - - def test_get_by_preferred_identifier(self): - cr = CatalogRecordV2.objects.get(pk=1) - cr.research_dataset["preferred_identifier"] = ( - "%s-/uhoh/special.chars?all&around" % cr.preferred_identifier - ) - cr.force_save() - response = self.client.get( - "/rest/v2/datasets?preferred_identifier=%s" - % urllib.parse.quote(cr.preferred_identifier) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data["research_dataset"]["preferred_identifier"], - cr.preferred_identifier, - ) - - def test_get_removed_by_preferred_identifier(self): - self._use_http_authorization() - response = self.client.delete("/rest/v2/datasets/%s" % self.identifier) - - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - response = self.client.get( - "/rest/v2/datasets?preferred_identifier=%s&removed=true" - % urllib.parse.quote(self.preferred_identifier) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_get_by_preferred_identifier_search_prefers_oldest_data_catalog(self): - """ - Search by preferred_identifier should prefer hits from oldest created catalogs - which are assumed to be att/fairdata catalogs. - """ - - # get a cr that has alternate records - cr = self._get_object_from_test_data("catalogrecord", requested_index=9) - pid = cr["research_dataset"]["preferred_identifier"] - - # verify there are more than one record with same pid! - count = CatalogRecordV2.objects.filter(research_dataset__preferred_identifier=pid).count() - self.assertEqual(count > 1, True, "makes no sense to test with a pid that exists only once") - - # the retrieved record should be the one that is in catalog 1 - response = self.client.get( - "/rest/v2/datasets?preferred_identifier=%s" % urllib.parse.quote(pid) - ) - self.assertEqual("alternate_record_set" in response.data, True) - self.assertEqual(response.data["data_catalog"]["id"], cr["data_catalog"]) - - def test_read_catalog_record_details_not_found(self): - response = self.client.get("/rest/v2/datasets/shouldnotexist") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_read_catalog_record_metadata_version_identifiers(self): - response = self.client.get("/rest/v2/datasets/metadata_version_identifiers") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(isinstance(response.data, list)) - self.assertTrue(len(response.data) > 0) - - def test_get_unique_preferred_identifiers(self): - """ - Get all unique preferred_identifiers, no matter if they are the latest dataset version or not. - """ - response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(isinstance(response.data, list)) - self.assertTrue(len(response.data) > 0) - - # save the current len, do some more operations, and compare the difference - ids_len = len(response.data) - - self._create_new_ds() - self._create_new_ds() - response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers") - self.assertEqual(len(response.data) - ids_len, 2, "should be two new PIDs") - - def test_get_latest_unique_preferred_identifiers(self): - """ - Get all unique preferred_identifiers, but only from the latest dataset versions. - """ - response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers?latest") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(isinstance(response.data, list)) - self.assertTrue(len(response.data) > 0) - - # save the current len, do some more operations, and compare the difference - ids_len = len(response.data) - - # files change - cr = CatalogRecordV2.objects.get(pk=1) - new_file_id = cr.files.all().order_by("-id").first().id + 1 - file_from_testdata = self._get_object_from_test_data("file", requested_index=new_file_id) - # warning, this is actual file metadata, would not pass schema validation if sent through api - cr.research_dataset["files"] = [file_from_testdata] - cr.save() - response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers?latest") - self.assertEqual(ids_len, len(response.data), "count should stay the same") - - # create new - self._create_new_ds() - self._create_new_ds() - response = self.client.get("/rest/v2/datasets/unique_preferred_identifiers?latest") - self.assertEqual(len(response.data) - ids_len, 2, "should be two new PIDs") - - def test_expand_relations(self): - cr = CatalogRecordV2.objects.get(pk=1) - cr.contract_id = 1 - cr.force_save() - - response = self.client.get("/rest/v2/datasets/1?expand_relation=data_catalog,contract") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - "catalog_json" in response.data["data_catalog"], - True, - response.data["data_catalog"], - ) - self.assertEqual( - "contract_json" in response.data["contract"], - True, - response.data["contract"], - ) - - def test_strip_sensitive_fields(self): - """ - Strip fields not intended for general public - """ - - def _check_fields(obj): - for sensitive_field in ["email", "telephone", "phone"]: - self.assertEqual( - sensitive_field not in obj["research_dataset"]["curator"][0], - True, - "field %s should have been stripped" % sensitive_field, - ) - - for cr in CatalogRecordV2.objects.filter(pk__in=(1, 2, 3)): - cr.research_dataset["curator"][0].update( - { - "email": "email@mail.com", - "phone": "123124", - "telephone": "123124", - } - ) - cr.force_save() - - self.client._credentials = {} - - response = self.client.get("/rest/v2/datasets/1") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - _check_fields(response.data) - - response = self.client.get("/rest/v2/datasets") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - for obj in response.data["results"]: - _check_fields(obj) - - response = self.client.get("/rest/datasets?pagination=false") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - for obj in response.data: - _check_fields(obj) - - def _create_new_ds(self): - new_cr = self.client.get("/rest/v2/datasets/2").data - new_cr.pop("id") - new_cr["research_dataset"].pop("preferred_identifier") - new_cr.pop("identifier") - self._use_http_authorization() - response = self.client.post("/rest/v2/datasets", new_cr, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - class CatalogRecordApiReadBasicAuthorizationTests(CatalogRecordApiReadCommon): """ @@ -298,94 +58,8 @@ class CatalogRecordApiReadBasicAuthorizationTests(CatalogRecordApiReadCommon): api_version = "v2" - # THE OK TESTS - - def test_returns_all_file_dir_info_for_open_catalog_record_if_no_authorization( - self, - ): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify all file and dir details info is returned for open cr /rest/v2/datasets/ without - # authorization - self._assert_ok(open_cr_json, "no") - - def test_returns_all_file_dir_info_for_login_catalog_record_if_no_authorization( - self, - ): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( - use_login_access_type=True - ) - - # Verify all file and dir details info is returned for login cr /rest/v2/datasets/ without authorization - self._assert_ok(login_cr_json, "no") - - @responses.activate - def test_returns_all_file_dir_info_for_open_catalog_record_if_owner_authorization( - self, - ): - self.create_end_user_data_catalogs() - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) - - # Verify all file and dir details info is returned for open owner-owned cr /rest/v2/datasets/ with - # owner authorization - self._assert_ok(open_cr_json, "owner") - - @responses.activate - def test_returns_all_file_dir_info_for_login_catalog_record_if_owner_authorization( - self, - ): - self.create_end_user_data_catalogs() - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( - set_owner=True, use_login_access_type=True - ) - - # Verify all file and dir details info is returned for login owner-owned cr /rest/v2/datasets/ with - # owner authorization - self._assert_ok(login_cr_json, "owner") - - def test_returns_all_file_dir_info_for_restricted_catalog_record_if_service_authorization( - self, - ): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify all file and dir details info is returned for restricted cr /rest/v2/datasets/ with - # service authorization - self._assert_ok(restricted_cr_json, "service") - - @responses.activate - def test_returns_all_file_dir_info_for_restricted_catalog_record_if_owner_authorization( - self, - ): - self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( - True - ) - - # Verify all file and dir details info is returned for restricted owner-owned cr /rest/v2/datasets/ with - # owner authorization - self._assert_ok(restricted_cr_json, "owner") - - def test_returns_all_file_dir_info_for_embargoed_catalog_record_if_available_reached_and_no_authorization( - self, - ): - available_embargoed_cr_json = ( - self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) - ) - - # Verify all file and dir details info is returned for embargoed cr /rest/v2/datasets/ when - # embargo date has been reached without authorization - self._assert_ok(available_embargoed_cr_json, "no") - # THE FORBIDDEN TESTS - def test_returns_limited_file_dir_info_for_restricted_catalog_record_if_no_authorization( - self, - ): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify limited file and dir info for restricted cr /rest/v2/datasets/ without authorization - self._assert_limited_or_no_file_dir_info(restricted_cr_json, "no") - def test_no_file_dir_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( self, ): @@ -421,509 +95,6 @@ def _assert_limited_or_no_file_dir_info(self, cr_json, credentials_type): # The below assert is a bit arbitrary self.assertFalse("identifier" in d) - def _assert_ok(self, cr_json, credentials_type): - self._set_http_authorization(credentials_type) - - file_amt = len(cr_json["research_dataset"]["files"]) - dir_amt = len(cr_json["research_dataset"]["directories"]) - pk = cr_json["id"] - - response = self.client.get( - "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) - self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - - for f in response.data["research_dataset"]["files"]: - self.assertTrue("details" in f) - # The below assert is a bit arbitrary - self.assertTrue("identifier" in f) - for d in response.data["research_dataset"]["directories"]: - self.assertTrue("details" in d) - # The below assert is a bit arbitrary - self.assertTrue("identifier" in d) - - -class CatalogRecordApiReadActorFilter(CatalogRecordApiReadCommon): - def test_agents_and_actors(self): - # set test conditions - cr = CatalogRecordV2.objects.get(pk=11) - cr.research_dataset["curator"] = [] - cr.research_dataset["curator"].append( - { - "@type": "Person", - "name": "Tarmo Termiitti", - "member_of": { - "identifier": "org_identifier", - "name": {"en": "Unique Organization"}, - }, - } - ) - cr.research_dataset["curator"].append({"@type": "Person", "name": "Keijo Kottarainen"}) - cr.research_dataset["curator"].append({"@type": "Person", "name": "Janus JƤrvinen"}) - cr.research_dataset["curator"].append({"@type": "Person", "name": "Laina Sakkonen"}) - cr.research_dataset["curator"].append( - { - "@type": "Person", - "name": "Kaisa Kuraattori", - "member_of": { - "identifier": "org_identifier", - "name": {"en": "Happy Organization"}, - }, - } - ) - cr.research_dataset["creator"] = [] - cr.research_dataset["creator"].append( - {"@type": "Organization", "name": {"en": "Unique Organization"}} - ) - cr.research_dataset["creator"].append( - {"@type": "Organization", "name": {"en": "Happy Organization"}} - ) - cr.research_dataset["creator"].append( - {"@type": "Organization", "name": {"en": "Sad Organization"}} - ) - cr.research_dataset["creator"].append( - {"@type": "Organization", "name": {"en": "Brilliant Organization"}} - ) - cr.research_dataset["creator"].append( - {"@type": "Organization", "name": {"en": "Wonderful Organization"}} - ) - cr.research_dataset["publisher"]["name"] = {} - cr.research_dataset["publisher"]["name"] = {"fi": "Originaali Organisaatio"} - cr.force_save() - - response = self.client.get("/rest/v2/datasets?creator_organization=happy") - self.assertEqual(len(response.data["results"]), 1, response.data) - - response = self.client.get("/rest/v2/datasets?creator_organization=Brilliant Organization") - self.assertEqual(len(response.data["results"]), 1, response.data) - - response = self.client.get("/rest/v2/datasets?curator_person=termiitti") - self.assertEqual(len(response.data["results"]), 1, response.data) - - response = self.client.get("/rest/v2/datasets?curator_person=Laina Sakkonen") - self.assertEqual(len(response.data["results"]), 1, response.data) - - response = self.client.get("/rest/v2/datasets?curator_organization=uniqu") - self.assertEqual(len(response.data["results"]), 1, response.data) - - response = self.client.get("/rest/v2/datasets?curator_organization=Happy Organization") - self.assertEqual(len(response.data["results"]), 1, response.data) - - response = self.client.get( - "/rest/v2/datasets?publisher_organization=originaali Organisaatio" - ) - self.assertEqual(len(response.data["results"]), 1, response.data) - - query = "curator_person=notfound&creator_organization=sad organ&condition_separator=AND" - response = self.client.get("/rest/v2/datasets?%s" % query) - self.assertEqual(len(response.data["results"]), 0, response.data) - - query = "curator_person=notfound&creator_organization=sad organ&condition_separator=OR" - response = self.client.get("/rest/v2/datasets?%s" % query) - self.assertEqual(len(response.data["results"]), 1, response.data) - - # test filter with pas filter - """ - Both organization and pas filters use internally Q-filters which are supposed to be AND'ed together. - """ - metax_user = settings.API_METAX_USER - self._use_http_authorization( - username=metax_user["username"], password=metax_user["password"] - ) - - response = self.client.get( - "/rest/v2/datasets?pas_filter=janus&creator_organization=sad organization" - ) - self.assertEqual(len(response.data["results"]), 1) - - response = self.client.get( - "/rest/v2/datasets?preservation_state=10&pas_filter=kaisa&creator_organization=notfound" - ) - self.assertEqual(len(response.data["results"]), 0) - - -class CatalogRecordApiReadQueryParamsTests(CatalogRecordApiReadCommon): - - """ - query_params filtering - """ - - def test_read_catalog_record_search_by_curator_1(self): - response = self.client.get("/rest/v2/datasets?curator=id:of:curator:rahikainen") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 10) - self.assertEqual( - response.data["results"][0]["research_dataset"]["curator"][0]["name"], - "Rahikainen", - "Curator name is not matching", - ) - self.assertEqual( - response.data["results"][4]["research_dataset"]["curator"][0]["name"], - "Rahikainen", - "Curator name is not matching", - ) - - def test_read_catalog_record_search_by_curator_2(self): - response = self.client.get("/rest/v2/datasets?curator=id:of:curator:jarski") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 4) - self.assertEqual( - response.data["results"][0]["research_dataset"]["curator"][0]["name"], - "Jarski", - "Curator name is not matching", - ) - self.assertEqual( - response.data["results"][3]["research_dataset"]["curator"][0]["name"], - "Jarski", - "Curator name is not matching", - ) - - def test_read_catalog_record_search_by_curator_not_found_1(self): - response = self.client.get("/rest/v2/datasets?curator=Not Found") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 0) - - def test_read_catalog_record_search_by_curator_not_found_case_sensitivity(self): - response = self.client.get("/rest/v2/datasets?curator=id:of:curator:Rahikainen") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 0) - - def test_read_catalog_record_search_by_curator_and_state_1(self): - response = self.client.get( - "/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=10" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 2) - self.assertEqual(response.data["results"][0]["id"], 2) - self.assertEqual(response.data["results"][0]["preservation_state"], 10) - self.assertEqual( - response.data["results"][0]["research_dataset"]["curator"][0]["name"], - "Rahikainen", - "Curator name is not matching", - ) - - def test_read_catalog_record_search_by_curator_and_state_2(self): - response = self.client.get( - "/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=40" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1) - self.assertEqual(response.data["results"][0]["id"], 4) - self.assertEqual(response.data["results"][0]["preservation_state"], 40) - self.assertEqual( - response.data["results"][0]["research_dataset"]["curator"][0]["name"], - "Rahikainen", - "Curator name is not matching", - ) - - def test_read_catalog_record_search_by_curator_and_state_not_found(self): - response = self.client.get( - "/rest/v2/datasets?curator=id:of:curator:rahikainen&preservation_state=55" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 0) - - def test_read_catalog_record_search_by_creator_id(self): - cr = CatalogRecordV2.objects.get(pk=1) - cr.user_created = "123" - cr.force_save() - response = self.client.get("/rest/v2/datasets?user_created=123") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1) - self.assertEqual(response.data["results"][0]["user_created"], "123") - - def test_read_catalog_record_search_by_metadata_provider_user(self): - response = self.client.get("/rest/v2/datasets?metadata_provider_user=123") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["count"], 0) - - cr = CatalogRecordV2.objects.get(pk=1) - cr.metadata_provider_user = "123" - cr.force_save() - - response = self.client.get("/rest/v2/datasets?metadata_provider_user=123") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["count"], 1) - - def test_read_catalog_record_search_by_metadata_owner_org(self): - owner_org = "org_id" - for cr in CatalogRecordV2.objects.filter(pk__in=[1, 2, 3]): - cr.metadata_owner_org = owner_org - cr.force_save() - - owner_org_2 = "org_id_2" - for cr in CatalogRecordV2.objects.filter(pk__in=[4, 5, 6]): - cr.metadata_owner_org = owner_org_2 - cr.force_save() - - response = self.client.get("/rest/v2/datasets?metadata_owner_org=%s" % owner_org) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 3) - - response = self.client.get( - "/rest/v2/datasets?metadata_owner_org=%s,%s" % (owner_org, owner_org_2) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 6) - - def test_filter_by_contract_org_identifier(self): - """ - Test filtering by contract_org_identifier, which matches using iregex - """ - metax_user = settings.API_METAX_USER - self._use_http_authorization( - username=metax_user["username"], password=metax_user["password"] - ) - - response = self.client.get("/rest/v2/datasets?contract_org_identifier=2345") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 10) - - response = self.client.get("/rest/v2/datasets?contract_org_identifier=1234567-1") - self.assertEqual(len(response.data["results"]), 10) - - response = self.client.get("/rest/v2/datasets?contract_org_identifier=1234567-123") - self.assertEqual(len(response.data["results"]), 0) - - def test_filter_by_contract_org_identifier_is_restricted(self): - response = self.client.get("/rest/v2/datasets?contract_org_identifier=1234") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - def test_read_catalog_record_search_by_data_catalog_id(self): - from metax_api.models.data_catalog import DataCatalog - - # Create a new data catalog - dc = self._get_object_from_test_data("datacatalog", requested_index=0) - dc_id = "original_dc_identifier" - dc["catalog_json"]["identifier"] = dc_id - self.client.post("/rest/v2/datacatalogs", dc, format="json") - - # Set the new data catalog for a catalog record and store the catalog record - cr = CatalogRecordV2.objects.get(pk=1) - cr.data_catalog = DataCatalog.objects.get(catalog_json__identifier=dc_id) - cr.force_save() - - # Verify - response = self.client.get("/rest/v2/datasets?data_catalog={0}".format(dc_id)) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1) - self.assertEqual(response.data["results"][0]["data_catalog"]["identifier"], dc_id) - - def test_filter_by_deprecated(self): - cr = CatalogRecordV2.objects.get(pk=1) - cr.deprecated = True - cr.force_save() - - response = self.client.get("/rest/v2/datasets?deprecated=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1, response.data["results"]) - self.assertTrue(response.data["results"][0]["deprecated"], response.data) - - response = self.client.get("/rest/v2/datasets?deprecated=false") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["results"][0]["id"], 2, response.data) - self.assertFalse(response.data["results"][0]["deprecated"], response.data) - - response = self.client.get("/rest/v2/datasets?deprecated=badbool") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_filter_by_api_version(self): - - # update one dataset to v2 so that we get non-zero values for that as well - response = self.client.put("/rest/v2/datasets", [self.cr_from_test_data], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - # both models return correct count because v2 model is just a proxy - v1_count = CatalogRecordV2.objects.filter(api_meta__contains={"version": 1}).count() - - response = self.client.get("/rest/v2/datasets?api_version=1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(v1_count, response.data["count"], response.data) - - v2_count = CatalogRecordV2.objects.filter(api_meta__contains={"version": 2}).count() - - response = self.client.get("/rest/v2/datasets?api_version=2") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(v2_count, response.data["count"], response.data) - - def test_filter_by_legacy(self): - self.create_legacy_data_catalogs() - self.create_legacy_dataset() - - # by default, legacy datasets are excluded - non_legacy_count = CatalogRecordV2.objects.exclude( - data_catalog__catalog_json__identifier__in=settings.LEGACY_CATALOGS - ).count() - response = self.client.get("/rest/v2/datasets") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(non_legacy_count, response.data["count"], response.data) - - # legacy datasets can be included with a parameter - count_all = CatalogRecordV2.objects.count() - response = self.client.get("/rest/v2/datasets?include_legacy") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(count_all, response.data["count"], response.data) - - -class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): - - """ - dataset xml transformations - """ - - def _create_dataset_with_doi(self): - # Create ida data catalog - dc = self._get_object_from_test_data("datacatalog", requested_index=0) - dc_id = settings.IDA_DATA_CATALOG_IDENTIFIER - dc["catalog_json"]["identifier"] = dc_id - self.client.post("/rest/v2/datacatalogs", dc, format="json") - - # Create new cr by requesting a doi identifier - cr_json = self.client.get("/rest/v2/datasets/1").data - cr_json.pop("preservation_identifier", None) - cr_json.pop("identifier") - cr_json["research_dataset"].pop("preferred_identifier", None) - cr_json["research_dataset"]["publisher"] = { - "@type": "Organization", - "name": {"und": "Testaaja"}, - } - cr_json["research_dataset"]["issued"] = "2010-01-01" - cr_json["data_catalog"] = dc_id - response = self.client.post("/rest/v2/datasets?pid_type=doi", cr_json, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - return response.data - - def test_read_dataset_xml_format_metax(self): - response = self.client.get("/rest/v2/datasets/1?dataset_format=metax") - self._check_dataset_xml_format_response(response, "%s' % doi[len("doi:") :] in response.data, - True, - response.data, - ) - - def test_read_dataset_format_datacite_odd_lang_abbrevation(self): - cr = CatalogRecordV2.objects.get(pk=1) - cr.research_dataset["publisher"] = { - "@type": "Organization", - "name": {"zk": "Testiorganisaatio"}, - } - cr.force_save() - response = self.client.get("/rest/v2/datasets/1?dataset_format=fairdata_datacite") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_read_dataset_format_dummy_datacite_doi(self): - """ - Ensure query parameter ?dummy_doi=true returns datacite xml with identifierType=DOI - and identifier value prefixed with 10.0/. If a real DOI is - available in the dataset, then dummy should NOT be returned. - """ - pid = self.client.get("/rest/v2/datasets/12").data["research_dataset"][ - "preferred_identifier" - ] - self.assertEqual(pid.startswith("doi:"), False, pid) - - for dataset_format in ["datacite", "fairdata_datacite"]: - response = self.client.get( - "/rest/v2/datasets/12?dataset_format=%s&dummy_doi=true" % dataset_format - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - '' in response.data, - True, - response.data, - ) - self.assertEqual("10.0/%s" % pid in response.data, True, response.data) - - # ensure if a real doi exists, then dummy should never be returned - cr = self._create_dataset_with_doi() - response = self.client.get( - "/rest/v2/datasets/%d?dataset_format=datacite&dummy_doi=true" % cr["id"] - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual('' in response.data, True, response.data) - self.assertEqual( - cr["preservation_identifier"][len("doi:") :] in response.data, - True, - response.data, - ) - self.assertEqual("10.0/%s" % pid in response.data, False, response.data) - - def _check_dataset_xml_format_response(self, response, element_name): - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("content-type" in response._headers, True, response._headers) - self.assertEqual( - "application/xml" in response._headers["content-type"][1], - True, - response._headers, - ) - self.assertEqual(" 6) - class CatalogRecordApiReadPopulateFileInfoTests(CatalogRecordApiReadCommon): @@ -932,67 +103,6 @@ class CatalogRecordApiReadPopulateFileInfoTests(CatalogRecordApiReadCommon): corresponding objects from their db tables. """ - def test_file_details_populated(self): - # without the flag nothing should happen - response = self.client.get("/rest/v2/datasets/1?include_user_metadata") - self.assertEqual( - all("details" not in f for f in response.data["research_dataset"]["files"]), - True, - ) - - response = self.client.get("/rest/v2/datasets/1?include_user_metadata&file_details") - self.assertEqual(response.status_code, status.HTTP_200_OK) - # check all fiels have the extra key 'details', and all details have the key 'identifier'. - # presumably the details were then filled in. - self.assertEqual( - all("details" in f for f in response.data["research_dataset"]["files"]), - True, - ) - self.assertEqual( - all("identifier" in f["details"] for f in response.data["research_dataset"]["files"]), - True, - ) - - def test_directory_details_populated(self): - # id 11 is one of the example datasets with full details. they should have a couple - # of directories attached. - CatalogRecordV2.objects.get(pk=11).calculate_directory_byte_sizes_and_file_counts() - - response = self.client.get("/rest/v2/datasets/11?include_user_metadata&file_details") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - # check all dirs have the extra key 'details', and all details have the key 'identifier'. - # presumably the details were then filled in. - self.assertEqual( - all("details" in f for f in response.data["research_dataset"]["directories"]), - True, - ) - self.assertEqual( - all( - "identifier" in f["details"] - for f in response.data["research_dataset"]["directories"] - ), - True, - ) - - # additionally check that file counts and total byte sizes are as expected - self.assertEqual( - response.data["research_dataset"]["directories"][0]["details"]["byte_size"], - 21000, - ) - self.assertEqual( - response.data["research_dataset"]["directories"][1]["details"]["byte_size"], - 21000, - ) - self.assertEqual( - response.data["research_dataset"]["directories"][0]["details"]["file_count"], - 20, - ) - self.assertEqual( - response.data["research_dataset"]["directories"][1]["details"]["file_count"], - 20, - ) - def test_file_details_for_deprecated_datasets(self): """ When a dataset is deprecated, it is possible that some of its directories no longer exist. @@ -1016,158 +126,12 @@ def test_file_details_for_deprecated_datasets(self): self.assertEqual(response.status_code, status.HTTP_200_OK) -class CatalogRecordApiReadPopulateFileInfoAuthorizationTests(CatalogRecordApiReadCommon): - """ - Test populating individual research_dataset.file and directory objects with their - corresponding objects from their db tables from authorization perspective. - """ - - # THE OK TESTS - - def test_returns_all_details_for_open_catalog_record_if_no_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify all file and dir details info is returned for open cr /rest/v2/datasets/?file_details without - # authorization - self._assert_ok(open_cr_json, "no") - - def test_returns_all_details_for_login_catalog_record_if_no_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify all file and dir details info is returned for open cr /rest/v2/datasets/?file_details without - # authorization - self._assert_ok(open_cr_json, "no") - - def test_returns_all_details_for_open_catalog_record_if_service_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify all file and dir details info is returned for open cr /rest/v2/datasets/?file_details with - # service authorization - self._assert_ok(open_cr_json, "service") - - @responses.activate - def test_returns_all_details_for_open_catalog_record_if_owner_authorization(self): - self.create_end_user_data_catalogs() - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) - - # Verify all file and dir details info is returned for open owner-owned - # cr /rest/v2/datasets/?file_details with owner authorization - self._assert_ok(open_cr_json, "owner") - - def test_returns_all_details_for_restricted_catalog_record_if_service_authorization( - self, - ): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify all file and dir details info is returned for restricted cr /rest/v2/datasets/?file_details with - # service authorization - self._assert_ok(restricted_cr_json, "service") - - @responses.activate - def test_returns_all_details_for_restricted_catalog_record_if_owner_authorization( - self, - ): - self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( - True - ) - - # Verify all file and dir details info is returned for restricted owner-owned cr - # /rest/v2/datasets/?file_details with owner authorization - self._assert_ok(restricted_cr_json, "owner") - - def test_returns_all_details_for_embargoed_catalog_record_if_available_reached_and_no_authorization( - self, - ): - available_embargoed_cr_json = ( - self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) - ) - - # Verify all file and dir details info is returned for embargoed cr /rest/v2/datasets/?file_details when - # embargo date has been reached without authorization - self._assert_ok(available_embargoed_cr_json, "no") - - # THE FORBIDDEN TESTS - - def test_returns_limited_info_for_restricted_catalog_record_if_no_authorization( - self, - ): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify limited file and dir info for restricted cr /rest/v2/datasets/?file_details without authorization - self._assert_limited_or_no_file_dir_info(restricted_cr_json, "no") - - def test_returns_limited_info_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( - self, - ): - not_available_embargoed_cr_json = ( - self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) - ) - - # Verify limited file and dir info for embargoed cr /rest/v2/datasets/?file_details when embargo date has - # not been reached without authorization - self._assert_limited_or_no_file_dir_info(not_available_embargoed_cr_json, "no") - - def _assert_limited_or_no_file_dir_info(self, cr_json, credentials_type): - self._set_http_authorization(credentials_type) - - file_amt = len(cr_json["research_dataset"]["files"]) - dir_amt = len(cr_json["research_dataset"]["directories"]) - pk = cr_json["id"] - - response = self.client.get( - "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) - self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - - for f in response.data["research_dataset"]["files"]: - self.assertTrue("details" in f) - # The below assert is a bit arbitrary - self.assertTrue(len(f["details"].keys()) < 5) - for d in response.data["research_dataset"]["directories"]: - self.assertTrue("details" in d) - # The below assert is a bit arbitrary - self.assertTrue(len(d["details"].keys()) < 5) - - def _assert_ok(self, cr_json, credentials_type): - self._set_http_authorization(credentials_type) - - file_amt = len(cr_json["research_dataset"]["files"]) - dir_amt = len(cr_json["research_dataset"]["directories"]) - pk = cr_json["id"] - - response = self.client.get( - "/rest/v2/datasets/{0}?include_user_metadata&file_details".format(pk) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["research_dataset"]["files"]), file_amt) - self.assertEqual(len(response.data["research_dataset"]["directories"]), dir_amt) - - for f in response.data["research_dataset"]["files"]: - self.assertTrue("details" in f) - # The below assert is a bit arbitrary - self.assertTrue(len(f["details"].keys()) > 5) - for d in response.data["research_dataset"]["directories"]: - self.assertTrue("details" in d) - # The below assert is a bit arbitrary - self.assertTrue(len(d["details"].keys()) > 5) - - class CatalogRecordApiReadFiles(CatalogRecordApiReadCommon): """ Test /datasets/pid/files api """ - def test_get_files(self): - file_count = CatalogRecordV2.objects.get(pk=1).files.count() - response = self.client.get("/rest/v2/datasets/1/files") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), file_count) - def test_get_files_specified_fields_only(self): """ Test use of query parameter ?file_fields=x,y,z @@ -1208,64 +172,8 @@ class CatalogRecordApiReadFilesAuthorization(CatalogRecordApiReadCommon): Test /datasets/pid/files api from authorization perspective """ - # THE OK TESTS - - def test_returns_ok_for_open_catalog_record_if_no_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify open dataset /rest/v2/datasets//files returns all the files even without authorization - self._assert_ok(open_cr_json, "no") - - def test_returns_ok_for_open_catalog_record_if_service_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify open dataset /rest/v2/datasets//files returns all the files with service authorization - self._assert_ok(open_cr_json, "service") - - @responses.activate - def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): - self.create_end_user_data_catalogs() - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) - - # Verify open owner-owned dataset /rest/v2/datasets//files returns all the files with owner authorization - self._assert_ok(open_cr_json, "owner") - - def test_returns_ok_for_restricted_catalog_record_if_service_authorization(self): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify restricted dataset /rest/v2/datasets//files returns all the files with service authorization - self._assert_ok(restricted_cr_json, "service") - - @responses.activate - def test_returns_ok_for_restricted_catalog_record_if_owner_authorization(self): - self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( - True - ) - - # Verify restricted owner-owned dataset /rest/v2/datasets//files returns all the files with - # owner authorization - self._assert_ok(restricted_cr_json, "owner") - - def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization( - self, - ): - available_embargoed_cr_json = ( - self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) - ) - - # Verify restricted dataset /rest/v2/datasets//files returns ok when embargo date has been reached without - # authorization - self._assert_ok(available_embargoed_cr_json, "no") - # THE FORBIDDEN TESTS - def test_returns_forbidden_for_restricted_catalog_record_if_no_authorization(self): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify restricted dataset /rest/v2/datasets//files returns forbidden without authorization - self._assert_forbidden(restricted_cr_json, "no") - def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( self, ): @@ -1281,15 +189,4 @@ def _assert_forbidden(self, cr_json, credentials_type): pk = cr_json["id"] self._set_http_authorization(credentials_type) response = self.client.get("/rest/v2/datasets/{0}/files".format(pk)) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - def _assert_ok(self, cr_json, credentials_type): - pk = cr_json["id"] - self._set_http_authorization(credentials_type) - rd = cr_json["research_dataset"] - file_amt = len(rd["files"]) + sum( - int(d["details"]["file_count"]) for d in rd["directories"] - ) - response = self.client.get("/rest/v2/datasets/{0}/files".format(pk)) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), file_amt) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py b/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py deleted file mode 100755 index 6233d399..00000000 --- a/src/metax_api/tests/api/rest/v2/views/datasets/referencedata.py +++ /dev/null @@ -1,657 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2020 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from copy import deepcopy - -from rest_framework import status - -from metax_api.services import ReferenceDataMixin as RDM -from metax_api.services.redis_cache_service import RedisClient - -from .write import CatalogRecordApiWriteCommon - - -class CatalogRecordApiWriteReferenceDataTests(CatalogRecordApiWriteCommon): - """ - Tests related to reference_data validation and dataset fields population - from reference_data, according to given uri or code as the value. - """ - - def test_organization_name_is_required(self): - """ - Organization 'name' field is not madatory in the schema, but that is only because it does - not make sense to end users when using an identifier from reference data, which will overwrite - the name anyway. - - If an organization identifier is used, which is not found in the reference data, and therefore - does not populate the name automatically, then the user is required to provide the name. - """ - - # simple case - cr = deepcopy(self.cr_full_ida_test_data) - cr["research_dataset"]["curator"] = [ - { - "@type": "Organization", - "identifier": "not found!", - # no name! - } - ] - response = self.client.post("/rest/v2/datasets", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # a more complex case. ensure organizations are found from deep structures - cr = deepcopy(self.cr_full_ida_test_data) - org = cr["research_dataset"]["provenance"][0]["was_associated_with"][0] - del org["name"] # should cause the error - org["@type"] = "Organization" - org["identifier"] = "not found!" - response = self.client.post("/rest/v2/datasets", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # try again. should be ok - org["identifier"] = "http://uri.suomi.fi/codelist/fairdata/organization/code/10076" - response = self.client.post("/rest/v2/datasets", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - def test_catalog_record_reference_data_missing_ok(self): - """ - The API should attempt to reload the reference data if it is missing from - cache for whatever reason, and successfully finish the request - """ - cache = RedisClient() - cache.delete("reference_data") - self.assertEqual( - cache.get("reference_data", master=True), - None, - "cache ref data should be missing after cache.delete()", - ) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - def test_missing_license_identifier_ok(self): - """ - Missing license identifier is ok if url is provided. - Works on att and ida datasets - """ - rd_ida = self.cr_full_ida_test_data["research_dataset"] - rd_ida["access_rights"]["license"] = [{"license": "http://a.very.nice.custom/url"}] - response = self.client.post("/rest/v2/datasets", self.cr_full_ida_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual( - len(response.data["research_dataset"]["access_rights"]["license"][0]), - 1, - response.data, - ) - - rd_att = self.cr_full_att_test_data["research_dataset"] - rd_att["access_rights"]["license"] = [ - { - "license": "http://also.fine.custom/uri", - "description": { - "en": "This is very informative description of this custom license." - }, - } - ] - rd_att["remote_resources"][0]["license"] = [ - { - "license": "http://cool.remote.uri", - "description": { - "en": "Proof that also remote licenses can be used with custom urls." - }, - } - ] - response = self.client.post("/rest/v2/datasets", self.cr_full_att_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual( - len(response.data["research_dataset"]["access_rights"]["license"][0]), - 2, - response.data, - ) - self.assertEqual( - len(response.data["research_dataset"]["remote_resources"][0]["license"][0]), - 2, - response.data, - ) - - def test_create_catalog_record_with_invalid_reference_data(self): - rd_ida = self.cr_full_ida_test_data["research_dataset"] - rd_ida["theme"][0]["identifier"] = "nonexisting" - rd_ida["field_of_science"][0]["identifier"] = "nonexisting" - rd_ida["language"][0]["identifier"] = "nonexisting" - rd_ida["access_rights"]["access_type"]["identifier"] = "nonexisting" - rd_ida["access_rights"]["license"][0]["identifier"] = "nonexisting" - rd_ida["other_identifier"][0]["type"]["identifier"] = "nonexisting" - rd_ida["spatial"][0]["place_uri"]["identifier"] = "nonexisting" - rd_ida["files"][0]["file_type"]["identifier"] = "nonexisting" - rd_ida["files"][0]["use_category"]["identifier"] = "nonexisting" - rd_ida["infrastructure"][0]["identifier"] = "nonexisting" - rd_ida["creator"][0]["contributor_role"][0]["identifier"] = "nonexisting" - rd_ida["curator"][0]["contributor_type"][0]["identifier"] = "nonexisting" - rd_ida["is_output_of"][0]["funder_type"]["identifier"] = "nonexisting" - rd_ida["directories"][0]["use_category"]["identifier"] = "nonexisting" - rd_ida["relation"][0]["relation_type"]["identifier"] = "nonexisting" - rd_ida["relation"][0]["entity"]["type"]["identifier"] = "nonexisting" - rd_ida["provenance"][0]["lifecycle_event"]["identifier"] = "nonexisting" - rd_ida["provenance"][1]["preservation_event"]["identifier"] = "nonexisting" - rd_ida["provenance"][0]["event_outcome"]["identifier"] = "nonexisting" - response = self.client.post("/rest/v2/datasets", self.cr_full_ida_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("research_dataset" in response.data.keys(), True) - self.assertEqual(len(response.data["research_dataset"]), 19) - - rd_att = self.cr_full_att_test_data["research_dataset"] - rd_att["remote_resources"][0]["license"][0]["identifier"] = "nonexisting" - rd_att["remote_resources"][1]["resource_type"]["identifier"] = "nonexisting" - rd_att["remote_resources"][0]["use_category"]["identifier"] = "nonexisting" - response = self.client.post("/rest/v2/datasets", self.cr_full_att_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("research_dataset" in response.data.keys(), True) - self.assertEqual(len(response.data["research_dataset"]), 3) - - def test_create_catalog_record_populate_fields_from_reference_data(self): - """ - 1) Insert codes from cached reference data to dataset identifier fields - that will be validated, and then populated - 2) Check that that the values in dataset identifier fields are changed from - codes to uris after a successful create - 3) Check that labels have also been copied to datasets to their approriate fields - """ - cache = RedisClient() - rf = RDM.get_reference_data(cache) - refdata = rf["reference_data"] - orgdata = rf["organization_data"] - refs = {} - - data_types = [ - "access_type", - "restriction_grounds", - "field_of_science", - "identifier_type", - "keyword", - "language", - "license", - "location", - "resource_type", - "file_type", - "use_category", - "research_infra", - "contributor_role", - "contributor_type", - "funder_type", - "relation_type", - "lifecycle_event", - "preservation_event", - "event_outcome", - ] - - # the values in these selected entries will be used throghout the rest of the test case - for dtype in data_types: - if dtype == "location": - entry = next((obj for obj in refdata[dtype] if obj.get("wkt", False)), None) - self.assertTrue(entry is not None) - else: - entry = refdata[dtype][1] - refs[dtype] = { - "code": entry["code"], - "uri": entry["uri"], - "label": entry.get("label", None), - "wkt": entry.get("wkt", None), - "scheme": entry.get("scheme", None), - } - - refs["organization"] = { - "uri": orgdata["organization"][0]["uri"], - "code": orgdata["organization"][0]["code"], - "label": orgdata["organization"][0]["label"], - } - - # replace the relations with objects that have only the identifier set with code as value, - # to easily check that label was populated (= that it appeared in the dataset after create) - # without knowing its original value from the generated test data - rd_ida = self.cr_full_ida_test_data["research_dataset"] - rd_ida["theme"][0] = {"identifier": refs["keyword"]["code"]} - rd_ida["field_of_science"][0] = {"identifier": refs["field_of_science"]["code"]} - rd_ida["language"][0] = {"identifier": refs["language"]["code"]} - rd_ida["access_rights"]["access_type"] = {"identifier": refs["access_type"]["code"]} - rd_ida["access_rights"]["restriction_grounds"][0] = { - "identifier": refs["restriction_grounds"]["code"] - } - rd_ida["access_rights"]["license"][0] = {"identifier": refs["license"]["code"]} - rd_ida["other_identifier"][0]["type"] = {"identifier": refs["identifier_type"]["code"]} - rd_ida["spatial"][0]["place_uri"] = {"identifier": refs["location"]["code"]} - rd_ida["files"][0]["file_type"] = {"identifier": refs["file_type"]["code"]} - rd_ida["files"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} - rd_ida["directories"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} - rd_ida["infrastructure"][0] = {"identifier": refs["research_infra"]["code"]} - rd_ida["creator"][0]["contributor_role"][0] = { - "identifier": refs["contributor_role"]["code"] - } - rd_ida["curator"][0]["contributor_type"][0] = { - "identifier": refs["contributor_type"]["code"] - } - rd_ida["is_output_of"][0]["funder_type"] = {"identifier": refs["funder_type"]["code"]} - rd_ida["relation"][0]["relation_type"] = {"identifier": refs["relation_type"]["code"]} - rd_ida["relation"][0]["entity"]["type"] = {"identifier": refs["resource_type"]["code"]} - rd_ida["provenance"][0]["lifecycle_event"] = {"identifier": refs["lifecycle_event"]["code"]} - rd_ida["provenance"][1]["preservation_event"] = { - "identifier": refs["preservation_event"]["code"] - } - rd_ida["provenance"][0]["event_outcome"] = {"identifier": refs["event_outcome"]["code"]} - - # these have other required fields, so only update the identifier with code - rd_ida["is_output_of"][0]["source_organization"][0]["identifier"] = refs["organization"][ - "code" - ] - rd_ida["is_output_of"][0]["has_funding_agency"][0]["identifier"] = refs["organization"][ - "code" - ] - rd_ida["other_identifier"][0]["provider"]["identifier"] = refs["organization"]["code"] - rd_ida["contributor"][0]["member_of"]["identifier"] = refs["organization"]["code"] - rd_ida["creator"][0]["member_of"]["identifier"] = refs["organization"]["code"] - rd_ida["curator"][0]["is_part_of"]["identifier"] = refs["organization"]["code"] - rd_ida["publisher"]["is_part_of"]["identifier"] = refs["organization"]["code"] - rd_ida["rights_holder"][0]["is_part_of"]["identifier"] = refs["organization"]["code"] - - # Other type of reference data populations - orig_wkt_value = rd_ida["spatial"][0]["as_wkt"][0] - rd_ida["spatial"][0]["place_uri"]["identifier"] = refs["location"]["code"] - rd_ida["spatial"][1]["as_wkt"] = [] - rd_ida["spatial"][1]["place_uri"]["identifier"] = refs["location"]["code"] - - response = self.client.post( - "/rest/v2/datasets?include_user_metadata", - self.cr_full_ida_test_data, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("research_dataset" in response.data.keys(), True) - - new_rd_ida = response.data["research_dataset"] - self._assert_uri_copied_to_identifier(refs, new_rd_ida) - self._assert_label_copied_to_pref_label(refs, new_rd_ida) - self._assert_label_copied_to_title(refs, new_rd_ida) - self._assert_label_copied_to_name(refs, new_rd_ida) - - # Assert if spatial as_wkt field has been populated with a value from ref data which has wkt value having - # condition that the user has not given own coordinates in the as_wkt field - self.assertEqual(orig_wkt_value, new_rd_ida["spatial"][0]["as_wkt"][0]) - self.assertEqual(refs["location"]["wkt"], new_rd_ida["spatial"][1]["as_wkt"][0]) - - # rd from att data catalog - rd_att = self.cr_full_att_test_data["research_dataset"] - rd_att["remote_resources"][1]["resource_type"] = { - "identifier": refs["resource_type"]["code"] - } - rd_att["remote_resources"][0]["use_category"] = {"identifier": refs["use_category"]["code"]} - rd_att["remote_resources"][0]["license"][0] = {"identifier": refs["license"]["code"]} - - # Assert remote resources related reference datas - response = self.client.post("/rest/v2/datasets", self.cr_full_att_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("research_dataset" in response.data.keys(), True) - new_rd_att = response.data["research_dataset"] - self._assert_att_remote_resource_items(refs, new_rd_att) - - def _assert_att_remote_resource_items(self, refs, new_rd): - self.assertEqual( - refs["resource_type"]["uri"], - new_rd["remote_resources"][1]["resource_type"]["identifier"], - ) - self.assertEqual( - refs["use_category"]["uri"], - new_rd["remote_resources"][0]["use_category"]["identifier"], - ) - self.assertEqual( - refs["license"]["uri"], - new_rd["remote_resources"][0]["license"][0]["identifier"], - ) - self.assertEqual( - refs["resource_type"]["label"], - new_rd["remote_resources"][1]["resource_type"].get("pref_label", None), - ) - self.assertEqual( - refs["use_category"]["label"], - new_rd["remote_resources"][0]["use_category"].get("pref_label", None), - ) - self.assertEqual( - refs["license"]["label"], - new_rd["remote_resources"][0]["license"][0].get("title", None), - ) - - def _assert_uri_copied_to_identifier(self, refs, new_rd): - self.assertEqual(refs["keyword"]["uri"], new_rd["theme"][0]["identifier"]) - self.assertEqual( - refs["field_of_science"]["uri"], new_rd["field_of_science"][0]["identifier"] - ) - self.assertEqual(refs["language"]["uri"], new_rd["language"][0]["identifier"]) - self.assertEqual( - refs["access_type"]["uri"], - new_rd["access_rights"]["access_type"]["identifier"], - ) - self.assertEqual( - refs["restriction_grounds"]["uri"], - new_rd["access_rights"]["restriction_grounds"][0]["identifier"], - ) - self.assertEqual( - refs["license"]["uri"], new_rd["access_rights"]["license"][0]["identifier"] - ) - self.assertEqual( - refs["identifier_type"]["uri"], - new_rd["other_identifier"][0]["type"]["identifier"], - ) - self.assertEqual(refs["location"]["uri"], new_rd["spatial"][0]["place_uri"]["identifier"]) - self.assertEqual(refs["file_type"]["uri"], new_rd["files"][0]["file_type"]["identifier"]) - self.assertEqual( - refs["use_category"]["uri"], - new_rd["files"][0]["use_category"]["identifier"], - ) - - self.assertEqual( - refs["use_category"]["uri"], - new_rd["directories"][0]["use_category"]["identifier"], - ) - self.assertEqual( - refs["organization"]["uri"], - new_rd["is_output_of"][0]["source_organization"][0]["identifier"], - ) - self.assertEqual( - refs["organization"]["uri"], - new_rd["is_output_of"][0]["has_funding_agency"][0]["identifier"], - ) - self.assertEqual( - refs["organization"]["uri"], - new_rd["other_identifier"][0]["provider"]["identifier"], - ) - self.assertEqual( - refs["organization"]["uri"], - new_rd["contributor"][0]["member_of"]["identifier"], - ) - self.assertEqual( - refs["organization"]["uri"], new_rd["creator"][0]["member_of"]["identifier"] - ) - self.assertEqual( - refs["organization"]["uri"], - new_rd["curator"][0]["is_part_of"]["identifier"], - ) - self.assertEqual( - refs["organization"]["uri"], new_rd["publisher"]["is_part_of"]["identifier"] - ) - self.assertEqual( - refs["organization"]["uri"], - new_rd["rights_holder"][0]["is_part_of"]["identifier"], - ) - self.assertEqual(refs["research_infra"]["uri"], new_rd["infrastructure"][0]["identifier"]) - self.assertEqual( - refs["contributor_role"]["uri"], - new_rd["creator"][0]["contributor_role"][0]["identifier"], - ) - self.assertEqual( - refs["contributor_type"]["uri"], - new_rd["curator"][0]["contributor_type"][0]["identifier"], - ) - self.assertEqual( - refs["funder_type"]["uri"], - new_rd["is_output_of"][0]["funder_type"]["identifier"], - ) - self.assertEqual( - refs["relation_type"]["uri"], - new_rd["relation"][0]["relation_type"]["identifier"], - ) - self.assertEqual( - refs["resource_type"]["uri"], - new_rd["relation"][0]["entity"]["type"]["identifier"], - ) - self.assertEqual( - refs["lifecycle_event"]["uri"], - new_rd["provenance"][0]["lifecycle_event"]["identifier"], - ) - self.assertEqual( - refs["preservation_event"]["uri"], - new_rd["provenance"][1]["preservation_event"]["identifier"], - ) - self.assertEqual( - refs["event_outcome"]["uri"], - new_rd["provenance"][0]["event_outcome"]["identifier"], - ) - - def _assert_scheme_copied_to_in_scheme(self, refs, new_rd): - self.assertEqual(refs["keyword"]["scheme"], new_rd["theme"][0]["in_scheme"]) - self.assertEqual( - refs["field_of_science"]["scheme"], - new_rd["field_of_science"][0]["in_scheme"], - ) - self.assertEqual(refs["language"]["scheme"], new_rd["language"][0]["in_scheme"]) - self.assertEqual( - refs["access_type"]["scheme"], - new_rd["access_rights"]["access_type"]["in_scheme"], - ) - self.assertEqual( - refs["restriction_grounds"]["scheme"], - new_rd["access_rights"]["restriction_grounds"][0]["in_scheme"], - ) - self.assertEqual( - refs["license"]["scheme"], - new_rd["access_rights"]["license"][0]["in_scheme"], - ) - self.assertEqual( - refs["identifier_type"]["scheme"], - new_rd["other_identifier"][0]["type"]["in_scheme"], - ) - self.assertEqual(refs["location"]["scheme"], new_rd["spatial"][0]["place_uri"]["in_scheme"]) - self.assertEqual(refs["file_type"]["scheme"], new_rd["files"][0]["file_type"]["in_scheme"]) - self.assertEqual( - refs["use_category"]["scheme"], - new_rd["files"][0]["use_category"]["in_scheme"], - ) - - self.assertEqual( - refs["use_category"]["scheme"], - new_rd["directories"][0]["use_category"]["in_scheme"], - ) - self.assertEqual(refs["research_infra"]["scheme"], new_rd["infrastructure"][0]["in_scheme"]) - self.assertEqual( - refs["contributor_role"]["scheme"], - new_rd["creator"][0]["contributor_role"]["in_scheme"], - ) - self.assertEqual( - refs["contributor_type"]["scheme"], - new_rd["curator"][0]["contributor_type"]["in_scheme"], - ) - self.assertEqual( - refs["funder_type"]["scheme"], - new_rd["is_output_of"][0]["funder_type"]["in_scheme"], - ) - self.assertEqual( - refs["relation_type"]["scheme"], - new_rd["relation"][0]["relation_type"]["in_scheme"], - ) - self.assertEqual( - refs["resource_type"]["scheme"], - new_rd["relation"][0]["entity"]["type"]["in_scheme"], - ) - self.assertEqual( - refs["lifecycle_event"]["scheme"], - new_rd["provenance"][0]["lifecycle_event"]["in_scheme"], - ) - self.assertEqual( - refs["preservation_event"]["scheme"], - new_rd["provenance"][1]["preservation_event"]["in_scheme"], - ) - self.assertEqual( - refs["event_outcome"]["scheme"], - new_rd["provenance"][0]["event_outcome"]["in_scheme"], - ) - - def _assert_label_copied_to_pref_label(self, refs, new_rd): - self.assertEqual(refs["keyword"]["label"], new_rd["theme"][0].get("pref_label", None)) - self.assertEqual( - refs["field_of_science"]["label"], - new_rd["field_of_science"][0].get("pref_label", None), - ) - self.assertEqual( - refs["access_type"]["label"], - new_rd["access_rights"]["access_type"].get("pref_label", None), - ) - self.assertEqual( - refs["restriction_grounds"]["label"], - new_rd["access_rights"]["restriction_grounds"][0].get("pref_label", None), - ) - self.assertEqual( - refs["identifier_type"]["label"], - new_rd["other_identifier"][0]["type"].get("pref_label", None), - ) - self.assertEqual( - refs["location"]["label"], - new_rd["spatial"][0]["place_uri"].get("pref_label", None), - ) - self.assertEqual( - refs["file_type"]["label"], - new_rd["files"][0]["file_type"].get("pref_label", None), - ) - self.assertEqual( - refs["use_category"]["label"], - new_rd["files"][0]["use_category"].get("pref_label", None), - ) - self.assertEqual( - refs["use_category"]["label"], - new_rd["directories"][0]["use_category"].get("pref_label", None), - ) - - self.assertEqual( - refs["research_infra"]["label"], - new_rd["infrastructure"][0].get("pref_label", None), - ) - self.assertEqual( - refs["contributor_role"]["label"], - new_rd["creator"][0]["contributor_role"][0].get("pref_label", None), - ) - self.assertEqual( - refs["contributor_type"]["label"], - new_rd["curator"][0]["contributor_type"][0].get("pref_label", None), - ) - self.assertEqual( - refs["funder_type"]["label"], - new_rd["is_output_of"][0]["funder_type"].get("pref_label", None), - ) - self.assertEqual( - refs["relation_type"]["label"], - new_rd["relation"][0]["relation_type"].get("pref_label", None), - ) - self.assertEqual( - refs["resource_type"]["label"], - new_rd["relation"][0]["entity"]["type"].get("pref_label", None), - ) - self.assertEqual( - refs["lifecycle_event"]["label"], - new_rd["provenance"][0]["lifecycle_event"].get("pref_label", None), - ) - self.assertEqual( - refs["preservation_event"]["label"], - new_rd["provenance"][1]["preservation_event"].get("pref_label", None), - ) - self.assertEqual( - refs["event_outcome"]["label"], - new_rd["provenance"][0]["event_outcome"].get("pref_label", None), - ) - - def _assert_label_copied_to_title(self, refs, new_rd): - required_langs = dict( - (lang, val) - for lang, val in refs["language"]["label"].items() - if lang in ["fi", "sv", "en", "und"] - ) - self.assertEqual(required_langs, new_rd["language"][0].get("title", None)) - self.assertEqual( - refs["license"]["label"], - new_rd["access_rights"]["license"][0].get("title", None), - ) - - def _assert_label_copied_to_name(self, refs, new_rd): - self.assertEqual( - refs["organization"]["label"], - new_rd["is_output_of"][0]["source_organization"][0].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["is_output_of"][0]["has_funding_agency"][0].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["other_identifier"][0]["provider"].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["contributor"][0]["member_of"].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["creator"][0]["member_of"].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["curator"][0]["is_part_of"].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["publisher"]["is_part_of"].get("name", None), - ) - self.assertEqual( - refs["organization"]["label"], - new_rd["rights_holder"][0]["is_part_of"].get("name", None), - ) - - def test_refdata_sub_org_main_org_population(self): - # Test parent org gets populated when sub org is from ref data and user has not provided is_part_of relation - self.cr_test_data["research_dataset"]["publisher"] = { - "@type": "Organization", - "identifier": "10076-A800", - } - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("is_part_of" in response.data["research_dataset"]["publisher"], True) - self.assertEqual( - "http://uri.suomi.fi/codelist/fairdata/organization/code/10076", - response.data["research_dataset"]["publisher"]["is_part_of"]["identifier"], - ) - self.assertTrue( - response.data["research_dataset"]["publisher"]["is_part_of"].get("name", False) - ) - - # Test parent org does not get populated when sub org is from ref data and user has provided is_part_of relation - self.cr_test_data["research_dataset"]["publisher"] = { - "@type": "Organization", - "identifier": "10076-A800", - "is_part_of": { - "@type": "Organization", - "identifier": "test_id", - "name": {"und": "test_name"}, - }, - } - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("is_part_of" in response.data["research_dataset"]["publisher"], True) - self.assertEqual( - "test_id", - response.data["research_dataset"]["publisher"]["is_part_of"]["identifier"], - ) - self.assertEqual( - "test_name", - response.data["research_dataset"]["publisher"]["is_part_of"]["name"]["und"], - ) - - # Test nothing happens when org is a parent org - self.cr_test_data["research_dataset"]["publisher"] = { - "@type": "Organization", - "identifier": "10076", - } - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("is_part_of" not in response.data["research_dataset"]["publisher"], True) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/rems.py b/src/metax_api/tests/api/rest/v2/views/datasets/rems.py deleted file mode 100755 index 1f293ce6..00000000 --- a/src/metax_api/tests/api/rest/v2/views/datasets/rems.py +++ /dev/null @@ -1,702 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2020 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -import unittest -from copy import deepcopy - -import responses -from django.conf import settings as django_settings -from rest_framework import status - -from metax_api.services import RedisCacheService as cache, ReferenceDataMixin as RDM -from metax_api.tests.utils import get_test_oidc_token - -from .write import CatalogRecordApiWriteCommon - -IDA_CATALOG = django_settings.IDA_DATA_CATALOG_IDENTIFIER - - -@unittest.skipIf(django_settings.REMS["ENABLED"] is not True, "Only run if REMS is enabled") -class CatalogRecordApiWriteREMS(CatalogRecordApiWriteCommon): - - rf = RDM.get_reference_data(cache) - # get by code to prevent failures if list ordering changes - access_permit = [ - type for type in rf["reference_data"]["access_type"] if type["code"] == "permit" - ][0] - access_open = [type for type in rf["reference_data"]["access_type"] if type["code"] == "open"][ - 0 - ] - - permit_rights = { - # license type does not matter - "license": [ - { - "title": rf["reference_data"]["license"][0]["label"], - "identifier": rf["reference_data"]["license"][0]["uri"], - } - ], - "access_type": { - "in_scheme": access_permit["scheme"], - "identifier": access_permit["uri"], - "pref_label": access_permit["label"], - }, - } - - open_rights = { - "access_type": { - "in_scheme": access_open["scheme"], - "identifier": access_open["uri"], - "pref_label": access_open["label"], - } - } - - # any other than what is included in permit_rights is sufficient - other_license = rf["reference_data"]["license"][1] - - def setUp(self): - super().setUp() - # Create ida data catalog - dc = self._get_object_from_test_data("datacatalog", requested_index=0) - dc_id = IDA_CATALOG - dc["catalog_json"]["identifier"] = dc_id - self.client.post("/rest/v2/datacatalogs", dc, format="json") - - # token for end user access - self.token = get_test_oidc_token(new_proxy=True) - - # mock successful rems access for creation, add fails later if needed. - # Not using regex to allow individual access failures - for entity in ["user", "workflow", "license", "resource", "catalogue-item"]: - self._mock_rems_write_access_succeeds("POST", entity, "create") - - self._mock_rems_read_access_succeeds("license") - - # mock successful rems access for deletion. Add fails later - for entity in ["catalogue-item", "workflow", "resource"]: - self._mock_rems_write_access_succeeds(method="PUT", entity=entity, action="archived") - self._mock_rems_write_access_succeeds(method="PUT", entity=entity, action="enabled") - - self._mock_rems_read_access_succeeds("catalogue-item") - self._mock_rems_read_access_succeeds("application") - self._mock_rems_write_access_succeeds(method="POST", entity="application", action="close") - - responses.add( - responses.GET, - f"{django_settings.REMS['BASE_URL']}/health", - json={"healthy": True}, - status=200, - ) - - def _get_access_granter(self, malformed=False): - """ - Returns user information - """ - access_granter = { - "userid": "testcaseuser" if not malformed else 1234, - "name": "Test User", - "email": "testcase@user.com", - } - - return access_granter - - def _mock_rems_write_access_succeeds(self, method, entity, action): - """ - method: HTTP method to be mocked [PUT, POST] - entity: REMS entity [application, catalogue-item, license, resource, user, workflow] - action: Action taken to entity [archived, close, create, edit, enabled] - """ - req_type = responses.POST if method == "POST" else responses.PUT - - body = {"success": True} - - if method == "POST" and action != "close": - # action condition needed because applications are closed with POST method - body["id"] = 6 - - responses.add( - req_type, - f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - json=body, - status=200, - ) - - def _mock_rems_read_access_succeeds(self, entity): - if entity == "license": - resp = [ - { - "id": 7, - "licensetype": "link", - "enabled": True, - "archived": False, - "localizations": { - "fi": { - "title": self.rf["reference_data"]["license"][0]["label"]["fi"], - "textcontent": self.rf["reference_data"]["license"][0]["uri"], - }, - "und": { - "title": self.rf["reference_data"]["license"][0]["label"]["und"], - "textcontent": self.rf["reference_data"]["license"][0]["uri"], - }, - }, - }, - { - "id": 8, - "licensetype": "link", - "enabled": True, - "archived": False, - "localizations": { - "en": { - "title": self.rf["reference_data"]["license"][1]["label"]["en"], - "textcontent": self.rf["reference_data"]["license"][1]["uri"], - } - }, - }, - ] - - elif entity == "catalogue-item": - resp = [ - { - "archived": False, - "localizations": { - "en": { - "id": 18, - "langcode": "en", - "title": "Removal test", - "infourl": "https://url.to.etsin.fi", - } - }, - "resource-id": 19, - "start": "2020-01-02T14:06:13.496Z", - "wfid": 15, - "resid": "preferred identifier", - "formid": 3, - "id": 18, - "expired": False, - "end": None, - "enabled": True, - } - ] - - elif entity == "application": - # only mock relevant data - resp = [ - { - "application/workflow": { - "workflow.dynamic/handlers": [{"userid": "somehandler"}] - }, - "application/id": 3, - "application/applicant": {"userid": "someapplicant"}, - "application/resources": [ - { - "catalogue-item/title": {"en": "Removal test"}, - "resource/ext-id": "some:pref:id", - "catalogue-item/id": 5, - } - ], - "application/state": "application.state/draft", - }, - { - "application/workflow": {"workflow.dynamic/handlers": [{"userid": "someid"}]}, - "application/id": 2, - "application/applicant": {"userid": "someotherapplicant"}, - "application/resources": [ - { - "catalogue-item/title": {"en": "Removal test"}, - "resource/ext-id": "some:pref:id", - "catalogue-item/id": 5, - } - ], - "application/state": "application.state/approved", - }, - { - "application/workflow": {"workflow.dynamic/handlers": [{"userid": "remsuid"}]}, - "application/id": 1, - "application/applicant": {"userid": "someapplicant"}, - "application/resources": [ - { - "catalogue-item/title": {"en": "Removal test"}, - "resource/ext-id": "Same:title:with:different:catalogue:item", - "catalogue-item/id": 18, - } - ], - "application/state": "application.state/draft", - }, - ] - - responses.add( - responses.GET, - f"{django_settings.REMS['BASE_URL']}/{entity}s", - json=resp, - status=200, - ) - - def _mock_rems_access_return_403(self, method, entity, action=""): - """ - Works also for GET method since failure responses from rems are identical for write and read operations - """ - req_type = ( - responses.POST - if method == "POST" - else responses.PUT - if method == "PUT" - else responses.GET - ) - - responses.replace( - req_type, - f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - status=403, # anything else than 200 is a fail - ) - - def _mock_rems_access_return_error(self, method, entity, action=""): - """ - operation status is defined in the body so 200 response can also be failure. - """ - req_type = ( - responses.POST - if method == "POST" - else responses.PUT - if method == "PUT" - else responses.GET - ) - - errors = [ - { - "type": "some kind of identifier of this error", - "somedetail": "entity identifier the error is conserning", - } - ] - - responses.replace( - req_type, - f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - json={"success": False, "errors": errors}, - status=200, - ) - - def _mock_rems_access_crashes(self, method, entity, action=""): - """ - Crash happens for example if there is a network error. Can be used for GET also - """ - req_type = ( - responses.POST - if method == "POST" - else responses.PUT - if method == "PUT" - else responses.GET - ) - - responses.replace( - req_type, - f"{django_settings.REMS['BASE_URL']}/{entity}s/{action}", - body=Exception("REMS_service should catch this one also"), - ) - - def _create_new_rems_dataset(self): - """ - Modifies catalog record to be REMS managed and post it to Metax - """ - self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights - self.cr_test_data["data_catalog"] = IDA_CATALOG - self.cr_test_data["access_granter"] = self._get_access_granter() - - response = self.client.post( - "/rest/v2/datasets?include_user_metadata", self.cr_test_data, format="json" - ) - - return response - - @responses.activate - def test_creating_permit_dataset_creates_catalogue_item_service_succeeds(self): - """ - Tests that catalogue item in REMS is created correctly on permit dataset creation - """ - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue( - response.data.get("rems_identifier") is not None, - "rems_identifier should be present", - ) - self.assertTrue( - response.data.get("access_granter") is not None, - "access_granter should be present", - ) - - @responses.activate - def test_creating_permit_dataset_creates_catalogue_item_service_fails_1(self): - """ - Test unsuccessful rems access - """ - self._mock_rems_access_return_403("POST", "workflow", "create") - - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue("failed to publish updates" in response.data["detail"][0], response.data) - - @responses.activate - def test_creating_permit_dataset_creates_catalogue_item_service_fails_2(self): - """ - Test unsuccessful rems access - """ - self._mock_rems_access_return_error("POST", "catalogue-item", "create") - - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - - @responses.activate - def test_creating_permit_dataset_creates_catalogue_item_service_fails_3(self): - """ - Test unsuccessful rems access - """ - self._mock_rems_access_crashes("POST", "resource", "create") - - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue("failed to publish updates" in response.data["detail"][0], response.data) - - @responses.activate - def test_changing_dataset_to_permit_creates_new_catalogue_item_succeeds(self): - """ - Test that changing access type to permit invokes the REMS update - """ - - # create dataset without rems managed access - self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights - self.cr_test_data["data_catalog"] = IDA_CATALOG - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # change to rems managed - cr = response.data - cr["research_dataset"]["access_rights"] = self.permit_rights - cr["access_granter"] = self._get_access_granter() - - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue( - response.data.get("rems_identifier") is not None, - "rems_identifier should be present", - ) - self.assertTrue( - response.data.get("access_granter") is not None, - "access_granter should be present", - ) - - @responses.activate - def test_changing_dataset_to_permit_creates_new_catalogue_item_fails(self): - """ - Test error handling on metax update operation - """ - self._mock_rems_access_return_error("POST", "user", "create") - - # create dataset without rems managed access - self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights - self.cr_test_data["data_catalog"] = IDA_CATALOG - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # change to rems managed - cr = response.data - cr["research_dataset"]["access_rights"] = self.permit_rights - cr["access_granter"] = self._get_access_granter() - - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - - @responses.activate - def test_changing_access_type_to_other_closes_rems_entities_succeeds(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr = response.data - cr["research_dataset"]["access_rights"] = self.open_rights - - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - @responses.activate - def test_changing_access_type_to_other_closes_rems_entities_fails(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - self._mock_rems_access_return_error("POST", "application", "close") - - cr = response.data - cr["research_dataset"]["access_rights"] = self.open_rights - - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - - @responses.activate - def test_changing_dataset_license_updates_rems(self): - """ - Create REMS dataset and change it's license. Ensure that - request is successful and that dataset's rems_identifier is changed. - """ - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_before = response.data - - rems_id_before = cr_before["rems_identifier"] - cr_before["research_dataset"]["access_rights"]["license"] = [ - { - "title": self.other_license["label"], - "identifier": self.other_license["uri"], - } - ] - - response = self.client.put(f'/rest/v2/datasets/{cr_before["id"]}', cr_before, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr_after = response.data - self.assertNotEqual( - rems_id_before, - cr_after["rems_identifier"], - "REMS identifier should have been changed", - ) - - @responses.activate - def test_changing_license_dont_allow_access_granter_changes(self): - """ - Create REMS dataset and change it's license. Ensure that - request is successful and that dataset's access_granter is not changed. - """ - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_before = response.data - - cr_before["access_granter"]["userid"] = "newid" - cr_before["research_dataset"]["access_rights"]["license"] = [ - {"identifier": self.other_license["uri"]} - ] - - response = self.client.put(f'/rest/v2/datasets/{cr_before["id"]}', cr_before, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr_after = response.data - self.assertNotEqual( - "newid", - cr_after["access_granter"]["userid"], - "userid should not have been changed", - ) - - @responses.activate - def test_deleting_license_updates_rems(self): - """ - Create REMS dataset and delete it's license. Ensure that rems_identifier is removed and no failures occur. - """ - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_before = response.data - - cr_before["research_dataset"]["access_rights"].pop("license") - - response = self.client.put(f'/rest/v2/datasets/{cr_before["id"]}', cr_before, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr_after = response.data - self.assertTrue( - cr_after.get("rems_identifier") is None, - "REMS identifier should have been deleted", - ) - self.assertTrue( - cr_after.get("access_granter") is None, - "access_granter should have been deleted", - ) - - @responses.activate - def test_creating_permit_dataset_creates_catalogue_item_end_user(self): - """ - Tests that catalogue item in REMS is created correctly on permit dataset creation. - User information is fetch'd from token. - """ - self._set_http_authorization("owner") - - # modify catalog record - self.cr_test_data["user_created"] = self.token["CSCUserName"] - self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] - self.cr_test_data["metadata_provider_org"] = self.token["schacHomeOrganization"] - self.cr_test_data["metadata_owner_org"] = self.token["schacHomeOrganization"] - self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights - self.cr_test_data["data_catalog"] = IDA_CATALOG - - # end user doesn't have permissions to the files and they are also not needed in this test - del self.cr_test_data["research_dataset"]["files"] - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - @responses.activate - def test_deleting_permit_dataset_removes_catalogue_item_succeeds(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_id = response.data["id"] - - # delete dataset - response = self.client.delete(f"/rest/v2/datasets/{cr_id}") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - - cr = self.client.get(f"/rest/v2/datasets/{cr_id}?removed").data - self.assertTrue(cr.get("rems_identifier") is None, "rems_identifier should not be present") - self.assertTrue(cr.get("access_granter") is None, "access_granter should not be present") - - @responses.activate - def test_deleting_permit_dataset_removes_catalogue_item_fails(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # delete dataset - self._mock_rems_access_return_error("PUT", "catalogue-item", "enabled") - - response = self.client.delete(f'/rest/v2/datasets/{response.data["id"]}') - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - - @responses.activate - def test_deprecating_permit_dataset_removes_catalogue_item_succeeds(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_before = response.data - # deprecate dataset - response = self.client.delete( - f"/rest/v2/files/{cr_before['research_dataset']['files'][0]['identifier']}" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr_after = self.client.get(f'/rest/v2/datasets/{cr_before["id"]}').data - self.assertTrue( - cr_after.get("rems_identifier") is None, - "rems_identifier should not be present", - ) - self.assertTrue( - cr_after.get("access_granter") is None, - "access_granter should not be present", - ) - - @responses.activate - def test_deprecating_permit_dataset_removes_catalogue_item_fails(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # deprecate dataset - self._mock_rems_access_crashes("PUT", "workflow", "archived") - - response = self.client.delete( - f"/rest/v2/files/{response.data['research_dataset']['files'][0]['identifier']}" - ) - self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE, response.data) - self.assertTrue("failed to publish" in response.data["detail"][0], response.data) - - def test_missing_access_granter(self): - """ - Access_granter field is required when dataset is made REMS managed and - user is service. - """ - - # test on create - self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights - self.cr_test_data["data_catalog"] = IDA_CATALOG - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue("access_granter" in response.data["detail"][0], response.data) - - # test on update - self.cr_test_data["research_dataset"]["access_rights"] = self.open_rights - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr = response.data - cr["research_dataset"]["access_rights"] = self.permit_rights - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue("access_granter" in response.data["detail"][0], response.data) - - def test_bad_access_granter_parameter(self): - """ - Access_granter values must be strings - """ - self.cr_test_data["research_dataset"]["access_rights"] = self.permit_rights - self.cr_test_data["data_catalog"] = IDA_CATALOG - self.cr_test_data["access_granter"] = self._get_access_granter(malformed=True) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue("must be string" in response.data["detail"][0], response.data) - - def test_missing_license_in_dataset(self): - """ - License is required when dataset is REMS managed - """ - self.cr_test_data["research_dataset"]["access_rights"] = deepcopy(self.permit_rights) - del self.cr_test_data["research_dataset"]["access_rights"]["license"] - self.cr_test_data["data_catalog"] = IDA_CATALOG - - response = self.client.post( - f"/rest/v2/datasets?access_granter={self._get_access_granter()}", - self.cr_test_data, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertTrue("must define license" in response.data["detail"][0], response.data) - - @responses.activate - def test_only_return_rems_info_to_privileged(self): - self._set_http_authorization("service") - - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue( - response.data.get("rems_identifier") is not None, - "rems_identifier should be returned to owner", - ) - self.assertTrue( - response.data.get("access_granter") is not None, - "access_granter should be returned to owner", - ) - - self._set_http_authorization("no") - response = self.client.get(f'/rest/v2/datasets/{response.data["id"]}') - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertTrue( - response.data.get("rems_identifier") is None, - "rems_identifier should not be returned to Anon", - ) - self.assertTrue( - response.data.get("access_granter") is None, - "access_granter should not be returned to Anon", - ) - - @responses.activate - def test_rems_info_cannot_be_changed(self): - response = self._create_new_rems_dataset() - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr = response.data - - cr["rems_identifier"] = "some:new:identifier" - cr["access_granter"]["name"] = "New Name" - - response = self.client.put(f'/rest/v2/datasets/{cr["id"]}', cr, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertNotEqual( - response.data["rems_identifier"], - "some:new:identifier", - "rems_id should not be changed", - ) - self.assertNotEqual( - response.data["access_granter"], - "New Name", - "access_granter should not be changed", - ) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/write.py b/src/metax_api/tests/api/rest/v2/views/datasets/write.py index ef00e592..43f3d012 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/write.py @@ -6,18 +6,14 @@ # :license: MIT from copy import deepcopy -from datetime import timedelta -import responses from django.conf import settings as django_settings from django.core.management import call_command from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import AlternateRecordSet, CatalogRecordV2, Contract, DataCatalog, File -from metax_api.models.catalog_record import ACCESS_TYPES -from metax_api.tests.utils import TestClassUtils, get_test_oidc_token, test_data_file_path -from metax_api.utils import get_tz_aware_now_without_micros +from metax_api.models import CatalogRecordV2, DataCatalog +from metax_api.tests.utils import TestClassUtils, test_data_file_path CR = CatalogRecordV2 @@ -64,17 +60,6 @@ def setUp(self): self._use_http_authorization() - def update_record(self, record): - return self.client.put("/rest/v2/datasets/%d" % record["id"], record, format="json") - - def get_next_version(self, record): - self.assertEqual("next_dataset_version" in record, True) - response = self.client.get( - "/rest/v2/datasets/%d" % record["next_dataset_version"]["id"], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - return response.data - # # # @@ -201,312 +186,6 @@ class CatalogRecordApiWriteCreateTests(CatalogRecordApiWriteCommon): def setUp(self): super().setUp() - def test_issued_date_is_generated(self): - """ Issued date is generated for all but harvested catalogs if it doesn't exists """ - dc = DataCatalog.objects.get(pk=2) - dc.catalog_json["identifier"] = IDA_CATALOG # Test with IDA catalog - dc.force_save() - - self.cr_test_data["data_catalog"] = dc.catalog_json - self.cr_test_data["research_dataset"].pop("issued", None) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue("issued" in response.data["research_dataset"], response.data) - - def test_create_catalog_record(self): - self.cr_test_data["research_dataset"]["preferred_identifier"] = "this_should_be_overwritten" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("research_dataset" in response.data.keys(), True) - self.assertEqual( - "metadata_version_identifier" in response.data["research_dataset"], - True, - "metadata_version_identifier should have been generated", - ) - self.assertEqual( - "preferred_identifier" in response.data["research_dataset"], - True, - "preferred_identifier should have been generated", - ) - self.assertNotEqual( - self.cr_test_data["research_dataset"]["preferred_identifier"], - response.data["research_dataset"]["preferred_identifier"], - "in fairdata catalogs, user is not allowed to set preferred_identifier", - ) - self.assertNotEqual( - response.data["research_dataset"]["preferred_identifier"], - response.data["research_dataset"]["metadata_version_identifier"], - "preferred_identifier and metadata_version_identifier should be generated separately", - ) - cr = CatalogRecordV2.objects.get(pk=response.data["id"]) - self.assertEqual( - cr.date_created >= get_tz_aware_now_without_micros() - timedelta(seconds=5), - True, - "Timestamp should have been updated during object creation", - ) - - def test_create_catalog_record_as_harvester(self): - self.cr_test_data["research_dataset"]["preferred_identifier"] = "this_should_be_saved" - self.cr_test_data["data_catalog"] = 3 - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual( - self.cr_test_data["research_dataset"]["preferred_identifier"], - response.data["research_dataset"]["preferred_identifier"], - "in harvested catalogs, user (the harvester) is allowed to set preferred_identifier", - ) - - def test_preferred_identifier_is_checked_also_from_deleted_records(self): - """ - If a catalog record having a specific preferred identifier is deleted, and a new catalog - record is created having the same preferred identifier, metax should deny this request - since a catalog record with the same pref id already exists, albeit deleted. - """ - - # dc 3 happens to be harvested catalog, which allows setting pref id - cr = CatalogRecordV2.objects.filter(data_catalog_id=3).first() - response = self.client.delete("/rest/v2/datasets/%d" % cr.id) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - - self.cr_test_data["research_dataset"]["preferred_identifier"] = cr.preferred_identifier - self.cr_test_data["data_catalog"] = 3 - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "already exists" in response.data["research_dataset"][0], - True, - response.data, - ) - - def test_create_catalog_contract_string_identifier(self): - contract_identifier = Contract.objects.first().contract_json["identifier"] - self.cr_test_data["contract"] = contract_identifier - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual( - response.data["contract"]["identifier"], contract_identifier, response.data - ) - - def test_create_catalog_error_contract_string_identifier_not_found(self): - self.cr_test_data["contract"] = "doesnotexist" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - # self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, 'Should have raised 404 not found') - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "contract" in response.data, - True, - "Error should have been about contract not found", - ) - - def test_create_catalog_record_json_validation_error_1(self): - """ - Ensure the json path of the error is returned along with other details - """ - self.cr_test_data["research_dataset"]["title"] = 1234456 - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - len(response.data), - 2, - "there should be two errors (error_identifier is one of them)", - ) - self.assertEqual( - "research_dataset" in response.data.keys(), - True, - "The error should concern the field research_dataset", - ) - self.assertEqual( - "1234456 is not of type" in response.data["research_dataset"][0], - True, - response.data, - ) - self.assertEqual( - "Json path: ['title']" in response.data["research_dataset"][0], - True, - response.data, - ) - - def test_create_catalog_record_json_validation_error_2(self): - """ - Ensure the json path of the error is returned along with other details also in - objects that are deeply nested - """ - self.cr_test_data["research_dataset"]["provenance"] = [ - { - "title": {"en": "provenance title"}, - "was_associated_with": [{"@type": "Person", "xname": "seppo"}], - } - ] - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - len(response.data), - 2, - "there should be two errors (error_identifier is one of them)", - ) - self.assertEqual( - "research_dataset" in response.data.keys(), - True, - "The error should concern the field research_dataset", - ) - self.assertEqual( - "is not valid" in response.data["research_dataset"][0], True, response.data - ) - self.assertEqual( - "was_associated_with" in response.data["research_dataset"][0], - True, - response.data, - ) - - def test_create_catalog_record_allowed_projects_ok(self): - response = self.client.post( - "/rest/v2/datasets?allowed_projects=project_x", - self.cr_test_data, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - def test_create_catalog_record_allowed_projects_fail(self): - # dataset file not in allowed projects - response = self.client.post( - "/rest/v2/datasets?allowed_projects=no,permission", - self.cr_test_data, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - # ensure list is properly handled (separated by comma, end result should be list) - response = self.client.post( - "/rest/v2/datasets?allowed_projects=no_good_project_x,another", - self.cr_test_data, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - # handle empty value - response = self.client.post( - "/rest/v2/datasets?allowed_projects=", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - # Other trickery - response = self.client.post( - "/rest/v2/datasets?allowed_projects=,", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - # - # create list operations - # - - def test_create_catalog_record_list(self): - response = self.client.post( - "/rest/v2/datasets", - [self.cr_test_data, self.cr_test_data_new_identifier], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual("object" in response.data["success"][0].keys(), True) - self.assertEqual(len(response.data["success"]), 2) - self.assertEqual(len(response.data["failed"]), 0) - - def test_create_catalog_record_list_error_one_fails(self): - self.cr_test_data["research_dataset"]["title"] = 1234456 - response = self.client.post( - "/rest/v2/datasets", - [self.cr_test_data, self.cr_test_data_new_identifier], - format="json", - ) - - """ - List response looks like - { - 'success': [ - { 'object': object }, - more objects... - ], - 'failed': [ - { - 'object': object, - 'errors': {'field': ['message'], 'otherfiled': ['message']} - }, - more objects... - ] - } - """ - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual("object" in response.data["failed"][0].keys(), True) - self.assertEqual( - "research_dataset" in response.data["failed"][0]["errors"], - True, - response.data, - ) - self.assertEqual( - "1234456 is not of type" in response.data["failed"][0]["errors"]["research_dataset"][0], - True, - response.data, - ) - self.assertEqual( - "Json path: ['title']" in response.data["failed"][0]["errors"]["research_dataset"][0], - True, - response.data, - ) - - def test_create_catalog_record_list_error_all_fail(self): - # data catalog is a required field, should fail - self.cr_test_data["data_catalog"] = None - self.cr_test_data_new_identifier["data_catalog"] = None - - response = self.client.post( - "/rest/v2/datasets", - [self.cr_test_data, self.cr_test_data_new_identifier], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual("object" in response.data["failed"][0].keys(), True) - self.assertEqual(len(response.data["success"]), 0) - self.assertEqual(len(response.data["failed"]), 2) - - def test_parameter_migration_override_preferred_identifier_when_creating(self): - """ - Normally, when saving to att/ida catalogs, providing a custom preferred_identifier is not - permitted. Using the optional query parameter ?migration_override=bool a custom preferred_identifier - can be passed. - """ - custom_pid = "custom-pid-value" - self.cr_test_data["research_dataset"]["preferred_identifier"] = custom_pid - response = self.client.post( - "/rest/v2/datasets?migration_override", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["research_dataset"]["preferred_identifier"], custom_pid) - - def test_parameter_migration_override_no_preferred_identifier_when_creating(self): - """ - Normally, when saving to att/ida catalogs, providing a custom preferred_identifier is not - permitted. Using the optional query parameter ?migration_override=bool a custom preferred_identifier - can be passed. - """ - self.cr_test_data["research_dataset"]["preferred_identifier"] = "" - response = self.client.post( - "/rest/v2/datasets?migration_override", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(len(response.data["research_dataset"]["preferred_identifier"]) > 0) - - self.cr_test_data["research_dataset"].pop("preferred_identifier", None) - response = self.client.post( - "/rest/v2/datasets?migration_override", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue(len(response.data["research_dataset"]["preferred_identifier"]) > 0) - def test_create_catalog_record_using_pid_type(self): # Test with pid_type = urn self.cr_test_data["research_dataset"]["preferred_identifier"] = "" @@ -572,70 +251,6 @@ class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): # create operations # - def test_create_catalog_record_error_preferred_identifier_cant_be_metadata_version_identifier( - self, - ): - """ - preferred_identifier can never be the same as a metadata_version_identifier in another cr, in any catalog. - """ - existing_metadata_version_identifier = CatalogRecordV2.objects.get( - pk=1 - ).metadata_version_identifier - self.cr_test_data["research_dataset"][ - "preferred_identifier" - ] = existing_metadata_version_identifier - - # setting preferred_identifier is only allowed in harvested catalogs. - self.cr_test_data["data_catalog"] = 3 - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "research_dataset" in response.data.keys(), - True, - "The error should be about an error in research_dataset", - ) - - # the error message should clearly state that the value of preferred_identifier appears in the - # field metadata_version_identifier in another record, therefore two asserts - self.assertEqual( - "preferred_identifier" in response.data["research_dataset"][0], - True, - "The error should be about metadata_version_identifier existing with this identifier", - ) - self.assertEqual( - "metadata_version_identifier" in response.data["research_dataset"][0], - True, - "The error should be about metadata_version_identifier existing with this identifier", - ) - - def test_create_catalog_record_error_preferred_identifier_exists_in_same_catalog( - self, - ): - """ - preferred_identifier already existing in the same data catalog is an error - """ - self.cr_test_data["research_dataset"]["preferred_identifier"] = "pid_by_harvester" - self.cr_test_data["data_catalog"] = 3 - cr_1 = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json").data - - self.cr_test_data["research_dataset"]["preferred_identifier"] = cr_1["research_dataset"][ - "preferred_identifier" - ] - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "research_dataset" in response.data.keys(), - True, - "The error should be about an error in research_dataset", - ) - self.assertEqual( - "preferred_identifier" in response.data["research_dataset"][0], - True, - "The error should be about preferred_identifier already existing", - ) - def test_create_catalog_record_preferred_identifier_exists_in_another_catalog(self): """ preferred_identifier existing in another data catalog is not an error. @@ -650,9 +265,9 @@ def test_create_catalog_record_preferred_identifier_exists_in_another_catalog(se response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - # - # update operations - # + # # + # # update operations + # # def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1( self, @@ -675,77 +290,6 @@ def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_1( response = self.client.patch("/rest/v2/datasets/3", data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_2( - self, - ): - """ - preferred_identifier existing in another data catalog is not an error. - - Test PATCH, when data_catalog is being updated to a different catalog - in the same request. In this case, the uniqueness check has to be executed - on the new data_catalog being passed. - - In this test, catalog is updated to 3, which should not contain a conflicting - identifier. - """ - unique_identifier = self._set_preferred_identifier_to_record(pk=1, catalog_id=1) - - data = self.client.get("/rest/v2/datasets/3").data - data["research_dataset"]["preferred_identifier"] = unique_identifier - data["data_catalog"] = 3 - - response = self.client.patch("/rest/v2/datasets/3", data, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, data) - - def test_update_catalog_record_preferred_identifier_exists_in_another_catalog_3( - self, - ): - """ - preferred_identifier already existing in the same data catalog is an error, - in other catalogs than ATT: Harvester or other catalogs cant contain same - preferred_identifier twice. - - Test PATCH, when data_catalog is being updated to a different catalog - in the same request. In this case, the uniqueness check has to be executed - on the new data_catalog being passed. - - In this test, catalog is updated to 3, which should contain a conflicting - identifier, resulting in an error. - """ - - # setup the record in db which will cause conflict - unique_identifier = self._set_preferred_identifier_to_record(pk=3, catalog_id=3) - - data = {"research_dataset": self.cr_test_data["research_dataset"]} - data["research_dataset"]["preferred_identifier"] = unique_identifier - data["data_catalog"] = 3 - - response = self.client.patch("/rest/v2/datasets/2", data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "preferred_identifier" in response.data["research_dataset"][0], - True, - "The error should be about preferred_identifier already existing", - ) - - def test_remote_doi_dataset_is_validated_against_datacite_format(self): - # Remote input DOI ids need to take datasets for datacite validation - cr = {"research_dataset": self.cr_test_data["research_dataset"]} - cr["research_dataset"]["preferred_identifier"] = "doi:10.5061/dryad.10188854" - cr["data_catalog"] = 3 - cr["metadata_provider_org"] = "metax" - cr["metadata_provider_user"] = "metax" - cr["research_dataset"].pop("publisher", None) - - response = self.client.post("/rest/v2/datasets", cr, format="json") - # Publisher value is required for datacite format, so this should return Http400 - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "a required value for datacite format" in response.data["detail"][0], - True, - response.data, - ) - # # helpers # @@ -783,47 +327,6 @@ def setUp(self): self._set_data_catalog_schema_to_harvester() self.cr_test_data["research_dataset"]["preferred_identifier"] = "unique_pid" - def test_catalog_record_with_not_found_json_schema_gets_default_schema(self): - # catalog has dataset schema, but it is not found on the server - dc = DataCatalog.objects.get(pk=1) - dc.catalog_json["research_dataset_schema"] = "nonexisting" - dc.save() - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # catalog has no dataset schema at all - dc = DataCatalog.objects.get(pk=1) - dc.catalog_json.pop("research_dataset_schema") - dc.save() - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - def test_catalog_record_create_with_other_schema(self): - """ - Ensure that dataset json schema validation works with other - json schemas than the default IDA - """ - self.cr_test_data["research_dataset"]["remote_resources"] = [ - {"title": "title"}, - {"title": "title"}, - ] - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - self.cr_test_data["research_dataset"]["remote_resources"] = [ - {"title": "title"}, - {"title": "title"}, - { - "woah": "this should give a failure, since title is a required field, and it is missing" - }, - ] - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - def test_catalog_record_draft_is_validated_with_draft_schema(self): """ Ensure that non-published datasets are always validated with draft schema regardless @@ -848,29 +351,6 @@ def test_catalog_record_draft_is_validated_with_draft_schema(self): ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - def test_catalog_record_ref_data_validation_with_other_schema(self): - """ - Ensure that dataset reference data validation and population works with other - json schemas than the default IDA. Ref data validation should be schema agnostic - """ - self.cr_test_data["research_dataset"]["other_identifier"] = [ - { - "notation": "urn:1", - "type": { - "identifier": "doi", - }, - } - ] - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual( - "uri.suomi.fi" - in response.data["research_dataset"]["other_identifier"][0]["type"]["identifier"], - True, - "Identifier type should have been populated with data from ref data", - ) - class CatalogRecordApiWriteUpdateTests(CatalogRecordApiWriteCommon): # @@ -879,156 +359,6 @@ class CatalogRecordApiWriteUpdateTests(CatalogRecordApiWriteCommon): # # - def test_update_catalog_record(self): - cr = self.client.get("/rest/v2/datasets/1").data - cr["preservation_description"] = "what" - - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["preservation_description"], "what") - cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual( - cr.date_modified >= get_tz_aware_now_without_micros() - timedelta(seconds=5), - True, - "Timestamp should have been updated during object update", - ) - - def test_update_catalog_record_error_using_preferred_identifier(self): - cr = self.client.get("/rest/v2/datasets/1").data - response = self.client.put( - "/rest/v2/datasets/%s" % cr["research_dataset"]["preferred_identifier"], - {"whatever": 123}, - format="json", - ) - self.assertEqual( - response.status_code, - status.HTTP_404_NOT_FOUND, - "Update operation should return 404 when using preferred_identifier", - ) - - def test_update_catalog_record_error_required_fields(self): - """ - Field 'research_dataset' is missing, which should result in an error, since PUT - replaces an object and requires all 'required' fields to be present. - """ - cr = self.client.get("/rest/v2/datasets/1").data - cr.pop("research_dataset") - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "research_dataset" in response.data.keys(), - True, - "Error for field 'research_dataset' is missing from response.data", - ) - - def test_update_catalog_record_not_found(self): - response = self.client.put( - "/rest/v2/datasets/doesnotexist", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_update_catalog_record_contract(self): - # take any cr that has a contract set - cr = CatalogRecordV2.objects.filter(contract_id__isnull=False).first() - old_contract_id = cr.contract.id - - # update contract to any different contract - cr_1 = self.client.get("/rest/v2/datasets/%d" % cr.id).data - cr_1["contract"] = Contract.objects.all().exclude(pk=old_contract_id).first().id - - response = self.client.put("/rest/v2/datasets/%d" % cr.id, cr_1, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - new_contract_id = CatalogRecordV2.objects.get(pk=cr.id).contract.id - self.assertNotEqual(old_contract_id, new_contract_id, "Contract should have changed") - - # - # update list operations PUT - # - - def test_catalog_record_update_list(self): - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1["preservation_description"] = "updated description" - - cr_2 = self.client.get("/rest/v2/datasets/2").data - cr_2["preservation_description"] = "second updated description" - - response = self.client.put("/rest/v2/datasets", [cr_1, cr_2], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["success"]), 2) - - updated_cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, "updated description") - updated_cr = CatalogRecordV2.objects.get(pk=2) - self.assertEqual(updated_cr.preservation_description, "second updated description") - - def test_catalog_record_update_list_error_one_fails(self): - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1["preservation_description"] = "updated description" - - # data catalog is a required field, should therefore fail - cr_2 = self.client.get("/rest/v2/datasets/2").data - cr_2.pop("data_catalog", None) - - response = self.client.put("/rest/v2/datasets", [cr_1, cr_2], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual( - isinstance(response.data["success"], list), - True, - "return data should contain key success, which is a list", - ) - self.assertEqual(len(response.data["success"]), 1) - self.assertEqual(len(response.data["failed"]), 1) - - updated_cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, "updated description") - - def test_catalog_record_update_list_error_key_not_found(self): - # does not have identifier key - cr_1 = self.client.get("/rest/v2/datasets/1").data - cr_1.pop("id") - cr_1.pop("identifier") - cr_1["research_dataset"].pop("metadata_version_identifier") - - cr_2 = self.client.get("/rest/v2/datasets/2").data - cr_2["preservation_description"] = "second updated description" - - response = self.client.put("/rest/v2/datasets", [cr_1, cr_2], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual(len(response.data["success"]), 1) - self.assertEqual(len(response.data["failed"]), 1) - - def test_catalog_record_deprecated_and_date_deprecated_cannot_be_set(self): - # Test catalog record's deprecated field cannot be set with POST, PUT or PATCH - - initial_deprecated = True - self.cr_test_data["deprecated"] = initial_deprecated - self.cr_test_data["date_deprecated"] = "2018-01-01T00:00:00" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.data["deprecated"], False) - self.assertTrue("date_deprecated" not in response.data) - - response_json = self.client.get("/rest/v2/datasets/1").data - initial_deprecated = response_json["deprecated"] - response_json["deprecated"] = not initial_deprecated - response_json["date_deprecated"] = "2018-01-01T00:00:00" - response = self.client.put("/rest/v2/datasets/1", response_json, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["deprecated"], initial_deprecated) - self.assertTrue("date_deprecated" not in response.data) - - initial_deprecated = self.client.get("/rest/v2/datasets/1").data["deprecated"] - response = self.client.patch( - "/rest/v2/datasets/1", {"deprecated": not initial_deprecated}, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["deprecated"], initial_deprecated) - self.assertTrue("date_deprecated" not in response.data) - def test_change_datacatalog_ATT_to_IDA(self): cr = self._get_new_full_test_att_cr_data() @@ -1095,264 +425,6 @@ def test_change_datacatalog_ATT_to_IDA(self): ) -class CatalogRecordApiWritePartialUpdateTests(CatalogRecordApiWriteCommon): - # - # - # update apis PATCH - # - # - - def test_update_catalog_record_partial(self): - new_data_catalog = self._get_object_from_test_data("datacatalog", requested_index=1)["id"] - new_data = { - "data_catalog": new_data_catalog, - } - response = self.client.patch( - "/rest/v2/datasets/%s" % self.identifier, new_data, format="json" - ) - - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - "research_dataset" in response.data.keys(), - True, - "PATCH operation should return full content", - ) - self.assertEqual( - response.data["data_catalog"]["id"], - new_data_catalog, - "Field data_catalog was not updated", - ) - - # - # update list operations PATCH - # - - def test_catalog_record_partial_update_list(self): - test_data = {} - test_data["id"] = 1 - test_data["preservation_description"] = "description" - - second_test_data = {} - second_test_data["id"] = 2 - second_test_data["preservation_description"] = "description 2" - - response = self.client.patch( - "/rest/v2/datasets", [test_data, second_test_data], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - "success" in response.data, - True, - "response.data should contain list of changed objects", - ) - self.assertEqual(len(response.data), 2, "response.data should contain 2 changed objects") - self.assertEqual( - "research_dataset" in response.data["success"][0]["object"], - True, - "response.data should contain full objects", - ) - - updated_cr = CatalogRecordV2.objects.get(pk=1) - self.assertEqual(updated_cr.preservation_description, "description") - - def test_catalog_record_partial_update_list_error_one_fails(self): - test_data = {} - test_data["id"] = 1 - test_data["preservation_description"] = "description" - - second_test_data = {} - second_test_data["preservation_state"] = 555 # value not allowed - second_test_data["id"] = 2 - - response = self.client.patch( - "/rest/v2/datasets", [test_data, second_test_data], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual(len(response.data["success"]), 1, "success list should contain one item") - self.assertEqual( - len(response.data["failed"]), 1, "there should have been one failed element" - ) - self.assertEqual( - "preservation_state" in response.data["failed"][0]["errors"], - True, - response.data["failed"][0]["errors"], - ) - - def test_catalog_record_partial_update_list_error_key_not_found(self): - # does not have identifier key - test_data = {} - test_data["preservation_state"] = 10 - - second_test_data = {} - second_test_data["id"] = 2 - second_test_data["preservation_state"] = 20 - - response = self.client.patch( - "/rest/v2/datasets", [test_data, second_test_data], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual(len(response.data["success"]), 1, "success list should contain one item") - self.assertEqual( - len(response.data["failed"]), 1, "there should have been one failed element" - ) - self.assertEqual( - "detail" in response.data["failed"][0]["errors"], - True, - response.data["failed"][0]["errors"], - ) - self.assertEqual( - "identifying key" in response.data["failed"][0]["errors"]["detail"][0], - True, - response.data["failed"][0]["errors"], - ) - - -class CatalogRecordApiWriteDeleteTests(CatalogRecordApiWriteCommon): - # - # - # - # delete apis - # - # - # - - def test_delete_catalog_record(self): - url = "/rest/v2/datasets/%s" % self.identifier - response = self.client.delete(url) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - response = self.client.get(url) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - try: - deleted_catalog_record = CatalogRecordV2.objects.get(identifier=self.identifier) - raise Exception( - "Deleted CatalogRecord should not be retrievable from the default objects table" - ) - except CatalogRecordV2.DoesNotExist: - # successful test should go here, instead of raising the expection in try: block - pass - - try: - deleted_catalog_record = CatalogRecordV2.objects_unfiltered.get( - identifier=self.identifier - ) - except CatalogRecordV2.DoesNotExist: - raise Exception( - "Deleted CatalogRecord should not be deleted from the db, but marked as removed" - ) - - self.assertEqual(deleted_catalog_record.removed, True) - self.assertEqual(deleted_catalog_record.identifier, self.identifier) - self.assertEqual( - deleted_catalog_record.date_modified, - deleted_catalog_record.date_removed, - "date_modified should be updated", - ) - - def test_delete_catalog_record_error_using_preferred_identifier(self): - url = "/rest/v2/datasets/%s" % self.preferred_identifier - response = self.client.delete(url) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_bulk_delete_catalog_record_permissions(self): - # create catalog with 'metax' edit permissions and create dataset with this catalog as 'metax' user - cr = self._get_new_test_cr_data() - cr.pop("id") - catalog = self._get_object_from_test_data("datacatalog", requested_index=0) - catalog.pop("id") - catalog["catalog_json"]["identifier"] = "metax-catalog" - catalog["catalog_record_services_edit"] = "metax" - catalog = self.client.post("/rest/v2/datacatalogs", catalog, format="json") - cr["data_catalog"] = { - "id": catalog.data["id"], - "identifier": catalog.data["catalog_json"]["identifier"], - } - - self._use_http_authorization(username="metax") - response = self.client.post("/rest/v2/datasets/", cr, format="json") - metax_cr = response.data["id"] - - # create catalog with 'testuser' edit permissions and create dataset with this catalog as 'testuser' user - cr = self._get_new_test_cr_data() - cr.pop("id") - catalog = self._get_object_from_test_data("datacatalog", requested_index=1) - catalog.pop("id") - catalog["catalog_json"]["identifier"] = "testuser-catalog" - catalog["catalog_record_services_edit"] = "testuser" - catalog = self.client.post("/rest/v2/datacatalogs", catalog, format="json") - cr["data_catalog"] = { - "id": catalog.data["id"], - "identifier": catalog.data["catalog_json"]["identifier"], - } - - self._use_http_authorization(username="testuser", password="testuserpassword") - response = self.client.post("/rest/v2/datasets/", cr, format="json") - testuser_cr = response.data["id"] - - # after trying to delete as 'testuser' only one catalog is deleted - response = self.client.delete("/rest/v2/datasets", [metax_cr, testuser_cr], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.post( - "/rest/v2/datasets/list?pagination=false", - [metax_cr, testuser_cr], - format="json", - ) - self.assertTrue(len(response.data), 1) - - # deleting from list of not accessable resource - response = self.client.delete("/rest/datasets", [metax_cr], format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.post( - "/rest/v2/datasets/list?pagination=false", - [metax_cr, testuser_cr], - format="json", - ) - self.assertTrue(len(response.data), 1) - - def test_bulk_delete_catalog_record(self): - ids = [1, 2, 3] - identifiers = CatalogRecordV2.objects.filter(pk__in=[4, 5, 6]).values_list( - "identifier", flat=True - ) - - for crs in [ids, identifiers]: - response = self.client.delete("/rest/datasets", crs, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data == [1, 2, 3] or response.data == [4, 5, 6]) - response = self.client.post("/rest/datasets/list?pagination=false", crs, format="json") - self.assertFalse(response.data) - - for cr in crs: - if isinstance(cr, int): - deleted = CatalogRecordV2.objects_unfiltered.get(id=cr) - else: - deleted = CatalogRecordV2.objects_unfiltered.get(identifier=cr) - - self.assertEqual(deleted.removed, True) - self.assertEqual( - deleted.date_modified, - deleted.date_removed, - "date_modified should be updated", - ) - - # failing tests - ids = [1000, 2000] - identifiers = ["1000", "2000"] - - for crs in [ids, identifiers]: - response = self.client.delete("/rest/datasets", ids, format="json") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - ids = [] - response = self.client.delete("/rest/datasets", ids, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue("Received empty list of identifiers" in response.data["detail"][0]) - - class CatalogRecordApiWriteAlternateRecords(CatalogRecordApiWriteCommon): """ @@ -1371,235 +443,6 @@ def setUp(self): self.cr_test_data["research_dataset"]["preferred_identifier"] = self.preferred_identifier self.cr_test_data["data_catalog"] = None - def test_alternate_record_set_is_created_if_it_doesnt_exist(self): - """ - Add a record, where a record already existed with the same pref_id, but did not have an - alternate_record_set yet. Ensure a new set is created, and both records are added to it. - """ - - # new record is saved to catalog 3, which does not support versioning - self.cr_test_data["data_catalog"] = 3 - - existing_records_count = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ).count() - self.assertEqual( - existing_records_count, - 1, - "in the beginning, there should be only one record with pref id %s" - % self.preferred_identifier, - ) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - records = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ) - self.assertEqual( - len(records), - 2, - "after, there should be two records with pref id %s" % self.preferred_identifier, - ) - - # both records are moved to same set - ars_id = records[0].alternate_record_set.id - self.assertEqual(records[0].alternate_record_set.id, ars_id) - self.assertEqual(records[1].alternate_record_set.id, ars_id) - - # records in the set are the ones expected - self.assertEqual(records[0].id, 1) - self.assertEqual(records[1].id, response.data["id"]) - - # records in the set are indeed in different catalogs - self.assertEqual(records[0].data_catalog.id, 1) - self.assertEqual(records[1].data_catalog.id, 3) - - def test_append_to_existing_alternate_record_set_if_it_exists(self): - """ - An alternate_record_set already exists with two records in it. Create a third - record with the same preferred_identifier. The created record should be added - to the existing alternate_record_set. - """ - self._set_preferred_identifier_to_record(pk=2, data_catalog=2) - self.cr_test_data["data_catalog"] = 3 - - existing_records_count = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ).count() - self.assertEqual( - existing_records_count, - 2, - "in the beginning, there should be two records with pref id %s" - % self.preferred_identifier, - ) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - - records = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ) - self.assertEqual( - len(records), - 3, - "after, there should be three records with pref id %s" % self.preferred_identifier, - ) - - # all records belong to same set - ars_id = records[0].alternate_record_set.id - self.assertEqual(records[0].alternate_record_set.id, ars_id) - self.assertEqual(records[1].alternate_record_set.id, ars_id) - self.assertEqual(records[2].alternate_record_set.id, ars_id) - - # records in the set are the ones expected - self.assertEqual(records[0].id, 1) - self.assertEqual(records[1].id, 2) - self.assertEqual(records[2].id, response.data["id"]) - - # records in the set are indeed in different catalogs - self.assertEqual(records[0].data_catalog.id, 1) - self.assertEqual(records[1].data_catalog.id, 2) - self.assertEqual(records[2].data_catalog.id, 3) - - def test_record_is_removed_from_alternate_record_set_when_deleted(self): - """ - When a record belong to an alternate_record_set with multiple other records, - only the records itself should be deleted. The alternate_record_set should keep - existing for the other records. - """ - - # initial conditions will have 3 records in the same set. - self._set_and_ensure_initial_conditions() - - response = self.client.delete("/rest/v2/datasets/2", format="json") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - - # check resulting conditions - records = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ) - self.assertEqual(records[0].alternate_record_set.records.count(), 2) - - def test_alternate_record_set_is_deleted_if_updating_record_with_no_versioning_and_one_record_left( - self, - ): - """ - Same as above, but updating a record in a catalog, which does NOT support versioning. - In this case, the the records itself gets updated, and removed from the old alternate_record_set. - Since the old alternate_record_set is left with only one other record, the alternate set - should be deleted. - """ - original_preferred_identifier = self.preferred_identifier - - # after this, pk=1 and pk=2 have the same preferred_identifier, in catalogs 1 and 3. - # note! catalog=3, so an update will not create a new version! - self._set_preferred_identifier_to_record(pk=2, data_catalog=3) - - # save for later checking - old_ars_id = CatalogRecordV2.objects.get(pk=2).alternate_record_set.id - - # retrieve record id=2, and change its preferred identifier - response = self.client.get("/rest/v2/datasets/2", format="json") - data = {"research_dataset": response.data["research_dataset"]} - data["research_dataset"]["preferred_identifier"] = "a:new:identifier:here" - - # updating preferred_identifier - a new version is NOT created - response = self.client.patch("/rest/v2/datasets/2", data=data, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - records = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": original_preferred_identifier} - ) - self.assertEqual(records.count(), 1) - - with self.assertRaises( - AlternateRecordSet.DoesNotExist, - msg="alternate record set should have been deleted", - ): - AlternateRecordSet.objects.get(pk=old_ars_id) - - def test_alternate_record_set_is_deleted_if_deleting_record_and_only_one_record_left( - self, - ): - """ - Same princible as above, but through deleting a record, instead of updating a record. - - End result for the alternate_record_set should be the same (it gets deleted). - """ - self._set_preferred_identifier_to_record(pk=2, data_catalog=2) - old_ars_id = CatalogRecordV2.objects.get(pk=2).alternate_record_set.id - - response = self.client.delete("/rest/v2/datasets/2", format="json") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - - records = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ) - self.assertEqual(records.count(), 1, "should be only record with this identifier left now") - - with self.assertRaises( - AlternateRecordSet.DoesNotExist, - msg="alternate record set should have been deleted", - ): - AlternateRecordSet.objects.get(pk=old_ars_id) - - def test_alternate_record_set_is_included_in_responses(self): - """ - Details of a dataset should contain field alternate_record_set in it. - For a particular record, the set should not contain its own metadata_version_identifier in the set. - """ - self.cr_test_data["data_catalog"] = 3 - msg_self_should_not_be_listed = "identifier of the record itself should not be listed" - - response_1 = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - response_2 = self.client.get("/rest/v2/datasets/1", format="json") - self.assertEqual(response_1.status_code, status.HTTP_201_CREATED) - self.assertEqual("alternate_record_set" in response_1.data, True) - self.assertEqual( - response_1.data["identifier"] not in response_1.data["alternate_record_set"], - True, - msg_self_should_not_be_listed, - ) - self.assertEqual( - response_2.data["identifier"] in response_1.data["alternate_record_set"], - True, - ) - - self.cr_test_data.update({"data_catalog": 4}) - response_3 = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response_3.status_code, status.HTTP_201_CREATED) - self.assertEqual("alternate_record_set" in response_3.data, True) - self.assertEqual( - response_1.data["identifier"] in response_3.data["alternate_record_set"], - True, - ) - self.assertEqual( - response_2.data["identifier"] in response_3.data["alternate_record_set"], - True, - ) - self.assertEqual( - response_3.data["identifier"] not in response_3.data["alternate_record_set"], - True, - msg_self_should_not_be_listed, - ) - - response_2 = self.client.get("/rest/v2/datasets/1", format="json") - self.assertEqual("alternate_record_set" in response_2.data, True) - self.assertEqual( - response_1.data["identifier"] in response_2.data["alternate_record_set"], - True, - ) - self.assertEqual( - response_3.data["identifier"] in response_2.data["alternate_record_set"], - True, - ) - self.assertEqual( - response_2.data["identifier"] not in response_2.data["alternate_record_set"], - True, - msg_self_should_not_be_listed, - ) - def _set_preferred_identifier_to_record(self, pk=1, data_catalog=1): """ Set preferred_identifier to an existing record to a value, and return that value, @@ -1616,59 +459,6 @@ def _set_preferred_identifier_to_record(self, pk=1, data_catalog=1): cr._handle_preferred_identifier_changed() return unique_identifier - def _set_and_ensure_initial_conditions(self): - """ - Update two existing records to have same pref_id and be in different catalogs, - to create an alternate_record_set. - """ - - # pk=1 also shares the same preferred_identifier (has been set in setUp()) - self._set_preferred_identifier_to_record(pk=2, data_catalog=3) - self._set_preferred_identifier_to_record(pk=3, data_catalog=4) - - # ensuring initial conditions... - records = CatalogRecordV2.objects.filter( - research_dataset__contains={"preferred_identifier": self.preferred_identifier} - ) - self.assertEqual( - len(records), - 3, - "in the beginning, there should be three records with pref id %s" - % self.preferred_identifier, - ) - ars_id = records[0].alternate_record_set.id - self.assertEqual(records[0].alternate_record_set.id, ars_id) - self.assertEqual(records[1].alternate_record_set.id, ars_id) - self.assertEqual(records[2].alternate_record_set.id, ars_id) - - def test_update_rd_title_creates_new_metadata_version(self): - """ - Updating the title of metadata should create a new metadata version. - """ - response_1 = self._get_and_update_title(self.pk) - self.assertEqual(response_1.status_code, status.HTTP_200_OK, response_1.data) - self._assert_metadata_version_count(response_1.data, 2) - - # get list of metadata versions to access contents... - response = self.client.get( - "/rest/v2/datasets/%d/metadata_versions" % response_1.data["id"], - format="json", - ) - - response_2 = self.client.get( - "/rest/v2/datasets/%d/metadata_versions/%s" - % (self.pk, response.data[0]["metadata_version_identifier"]), - format="json", - ) - self.assertEqual(response_2.status_code, status.HTTP_200_OK, response_2.data) - self.assertEqual("preferred_identifier" in response_2.data, True) - - # note! response_1 == cr, response_2 == rd - self.assertEqual( - response_1.data["research_dataset"]["preferred_identifier"], - response_2.data["preferred_identifier"], - ) - def test_dataset_version_lists_removed_records(self): # create version2 of a record @@ -1698,28 +488,6 @@ def test_dataset_version_lists_removed_records(self): self.assertTrue(response.data["dataset_version_set"][0].get("date_removed") is not None) self.assertFalse(response.data["dataset_version_set"][1].get("date_removed")) - def _assert_metadata_version_count(self, record, count): - response = self.client.get( - "/rest/v2/datasets/%d/metadata_versions" % record["id"], format="json" - ) - self.assertEqual(len(response.data), count) - - def _set_cr_to_catalog(self, pk=None, dc=None): - cr = CatalogRecordV2.objects.get(pk=pk) - cr.data_catalog_id = dc - cr.force_save() - - def _get_and_update_title(self, pk, params=None): - """ - Get, modify, and update data for given pk. The modification should cause a new - version to be created if the catalog permits. - - Should not force preferred_identifier to change. - """ - data = self.client.get("/rest/v2/datasets/%d" % pk, format="json").data - data["research_dataset"]["title"]["en"] = "modified title" - return self.client.put("/rest/v2/datasets/%d%s" % (pk, params or ""), data, format="json") - def test_allow_metadata_changes_after_deprecation(self): """ For deprecated datasets metadata changes are still allowed. Changing user metadata for files that @@ -1761,29 +529,6 @@ def test_allow_metadata_changes_after_deprecation(self): ) -class CatalogRecordApiWriteRemoteResources(CatalogRecordApiWriteCommon): - - """ - remote_resources related tests - """ - - def test_calculate_total_remote_resources_byte_size(self): - cr_with_rr = self._get_object_from_test_data("catalogrecord", requested_index=14) - rr = cr_with_rr["research_dataset"]["remote_resources"] - total_remote_resources_byte_size = sum(res["byte_size"] for res in rr) - self.cr_att_test_data["research_dataset"]["remote_resources"] = rr - response = self.client.post("/rest/v2/datasets", self.cr_att_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual( - "total_remote_resources_byte_size" in response.data["research_dataset"], - True, - ) - self.assertEqual( - response.data["research_dataset"]["total_remote_resources_byte_size"], - total_remote_resources_byte_size, - ) - - class CatalogRecordApiWriteLegacyDataCatalogs(CatalogRecordApiWriteCommon): """ @@ -1801,25 +546,6 @@ def setUp(self): del self.cr_test_data["research_dataset"]["files"] del self.cr_test_data["research_dataset"]["total_files_byte_size"] - def test_legacy_catalog_pids_are_not_unique(self): - # values provided as pid values in legacy catalogs are not required to be unique - # within the catalog. - self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] - self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" - same_pid_ids = [] - for i in range(3): - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "a") - same_pid_ids.append(response.data["id"]) - - # pid can even be same as an existing dataset's pid in an ATT catalog - real_pid = CatalogRecordV2.objects.get(pk=1).preferred_identifier - self.cr_test_data["research_dataset"]["preferred_identifier"] = real_pid - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["research_dataset"]["preferred_identifier"], real_pid) - def test_legacy_catalog_pid_must_be_provided(self): # pid cant be empty string self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] @@ -1832,41 +558,6 @@ def test_legacy_catalog_pid_must_be_provided(self): response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - def test_legacy_catalog_pids_update(self): - # test setup - self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] - self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # update record. in updates uniqueness also should not be checked - modify = response.data - real_pid = CatalogRecordV2.objects.get(pk=1).preferred_identifier - modify["research_dataset"]["preferred_identifier"] = real_pid - response = self.client.put( - "/rest/v2/datasets/%s?include_legacy" % modify["id"], modify, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_delete_legacy_catalog_dataset(self): - """ - Datasets in legacy catalogs should be deleted permanently, instead of only marking them - as 'removed'. - """ - - # test setup - self.cr_test_data["data_catalog"] = LEGACY_CATALOGS[0] - self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - cr_id = response.data["id"] - - # delete record - response = self.client.delete("/rest/v2/datasets/%s?include_legacy" % cr_id, format="json") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - results_count = CatalogRecordV2.objects_unfiltered.filter(pk=cr_id).count() - self.assertEqual(results_count, 0, "record should have been deleted permantly") - class CatalogRecordApiWriteOwnerFields(CatalogRecordApiWriteCommon): @@ -1929,301 +620,6 @@ def test_metadata_provider_user_is_readonly_after_creating(self): self.assertEqual(response.data["metadata_provider_user"], original) -class CatalogRecordApiEndUserAccess(CatalogRecordApiWriteCommon): - - """ - End User Access -related permission testing. - """ - - def setUp(self): - super().setUp() - - # create catalogs with end user access permitted - dc = DataCatalog.objects.get(pk=1) - catalog_json = dc.catalog_json - for identifier in END_USER_ALLOWED_DATA_CATALOGS: - catalog_json["identifier"] = identifier - dc = DataCatalog.objects.create( - catalog_json=catalog_json, - date_created=get_tz_aware_now_without_micros(), - catalog_record_services_create="testuser,api_auth_user,metax", - catalog_record_services_edit="testuser,api_auth_user,metax", - ) - - self.token = get_test_oidc_token() - - # by default, use the unmodified token. to use a different/modified token - # for various test scenarions, alter self.token, and call the below method again - self._use_http_authorization(method="bearer", token=self.token) - - # no reason to test anything related to failed authentication, since failed - # authentication stops the request from proceeding anywhere - self._mock_token_validation_succeeds() - - def _set_cr_owner_to_token_user(self, cr_id): - cr = CatalogRecordV2.objects.get(pk=cr_id) - cr.user_created = self.token["CSCUserName"] - cr.metadata_provider_user = self.token["CSCUserName"] - cr.editor = None # pretend the record was created by user directly - cr.force_save() - - def _set_cr_to_permitted_catalog(self, cr_id): - cr = CatalogRecordV2.objects.get(pk=cr_id) - cr.data_catalog_id = DataCatalog.objects.get( - catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0] - ).id - cr.force_save() - - @responses.activate - def test_user_can_create_dataset(self): - """ - Ensure end user can create a new dataset, and required fields are - automatically placed and the user is only able to affect allowed - fields - """ - user_created = self.token["CSCUserName"] - metadata_provider_user = self.token["CSCUserName"] - metadata_provider_org = self.token["schacHomeOrganization"] - metadata_owner_org = self.token["schacHomeOrganization"] - - self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - self.cr_test_data["contract"] = 1 - self.cr_test_data["editor"] = {"nope": "discarded by metax"} - self.cr_test_data["preservation_description"] = "discarded by metax" - self.cr_test_data["preservation_reason_description"] = "discarded by metax" - self.cr_test_data["preservation_state"] = 10 - self.cr_test_data.pop("metadata_provider_user", None) - self.cr_test_data.pop("metadata_provider_org", None) - self.cr_test_data.pop("metadata_owner_org", None) - - # test file permission checking in another test - self.cr_test_data["research_dataset"].pop("files", None) - self.cr_test_data["research_dataset"].pop("directories", None) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) - self.assertEqual(response.data["user_created"], user_created) - self.assertEqual(response.data["metadata_provider_user"], metadata_provider_user) - self.assertEqual(response.data["metadata_provider_org"], metadata_provider_org) - self.assertEqual(response.data["metadata_owner_org"], metadata_owner_org) - self.assertEqual("contract" in response.data, False) - self.assertEqual("editor" in response.data, False) - self.assertEqual("preservation_description" in response.data, False) - self.assertEqual("preservation_reason_description" in response.data, False) - self.assertEqual(response.data["preservation_state"], 0) - - @responses.activate - def test_user_can_create_datasets_only_to_limited_catalogs(self): - """ - End users should not be able to create datasets for example to harvested - data catalogs. - """ - - # test file permission checking in another test - self.cr_test_data["research_dataset"].pop("files", None) - self.cr_test_data["research_dataset"].pop("directories", None) - - # should not work - self.cr_test_data["data_catalog"] = 1 - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - # check error has expected error description - self.assertEqual("selected data catalog" in response.data["detail"][0], True, response.data) - - # should work - for identifier in END_USER_ALLOWED_DATA_CATALOGS: - if identifier in LEGACY_CATALOGS: - self.cr_test_data["research_dataset"]["preferred_identifier"] = "a" - - params = "draft" if identifier == DFT_CATALOG else "" - self.cr_test_data["data_catalog"] = identifier - response = self.client.post( - f"/rest/v2/datasets?{params}", self.cr_test_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - if identifier in LEGACY_CATALOGS: - # prevents next test from crashing if legacy catalog is not the last in the list - del self.cr_test_data["research_dataset"]["preferred_identifier"] - - @responses.activate - def test_owner_can_edit_dataset(self): - """ - Ensure end users are able to edit datasets owned by them. - Ensure end users can only edit permitted fields. - Note: File project permissions should not be checked, since files are not changed. - """ - - # create test record - self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] - self.cr_test_data["research_dataset"].pop( - "files", None - ) # test file permission checking in another test - self.cr_test_data["research_dataset"].pop("directories", None) - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - modified_data = response.data - # research_dataset is the only permitted field to edit - modified_data["research_dataset"]["value"] = 112233 - modified_data["contract"] = 1 - modified_data["editor"] = {"nope": "discarded by metax"} - modified_data["preservation_description"] = "discarded by metax" - modified_data["preservation_reason_description"] = "discarded by metax" - modified_data["preservation_state"] = 10 - - response = self.client.put( - "/rest/v2/datasets/%d" % modified_data["id"], modified_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["research_dataset"]["value"], 112233) # value we set - self.assertEqual(response.data["user_modified"], self.token["CSCUserName"]) # set by metax - - # none of these should have been affected - self.assertEqual("contract" in response.data, False) - self.assertEqual("editor" in response.data, False) - self.assertEqual("preservation_description" in response.data, False) - self.assertEqual("preservation_reason_description" in response.data, False) - self.assertEqual(response.data["preservation_state"], 0) - - @responses.activate - def test_owner_can_edit_datasets_only_in_permitted_catalogs(self): - """ - Ensure end users are able to edit datasets only in permitted catalogs, even if they - own the record (catalog may be disabled from end user editing for reason or another). - """ - - # create test record - self.cr_test_data["data_catalog"] = 1 - self.cr_test_data["user_created"] = self.token["CSCUserName"] - self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] - self.cr_test_data.pop("editor", None) - - self._use_http_authorization() # create cr as a service-user - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - modified_data = response.data - modified_data["research_dataset"]["value"] = 112233 - - self._use_http_authorization(method="bearer", token=self.token) - response = self.client.put( - "/rest/v2/datasets/%d" % modified_data["id"], modified_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - @responses.activate - def test_other_users_cant_edit_dataset(self): - """ - Ensure end users are unable edit datasets not owned by them. - """ - response = self.client.get("/rest/v2/datasets/1", format="json") - modified_data = response.data - modified_data["research_dataset"]["value"] = 112233 - - response = self.client.put("/rest/v2/datasets/1", modified_data, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - response = self.client.put("/rest/v2/datasets", [modified_data], format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - # ^ individual errors do not have error codes, only the general request - # has an error code for a failed request. - - @responses.activate - def test_user_can_delete_dataset(self): - self._set_cr_owner_to_token_user(1) - self._set_cr_to_permitted_catalog(1) - response = self.client.delete("/rest/v2/datasets/1", format="json") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - - @responses.activate - def test_user_file_permissions_are_checked_during_dataset_create(self): - """ - Ensure user's association with a project is checked during dataset create when - attaching files or directories to a dataset. - """ - - # try creating without proper permisisons - self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.content) - - # add project membership to user's token and try again - file_identifier = self.cr_test_data["research_dataset"]["files"][0]["identifier"] - project_identifier = File.objects.get(identifier=file_identifier).project_identifier - self.token["group_names"].append("IDA01:%s" % project_identifier) - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) - - @responses.activate - def test_user_file_permissions_are_checked_during_dataset_update(self): - """ - Ensure user's association with a project is checked during dataset update when - attaching files or directories to a dataset. The permissions should be checked - only for changed files (newly added, or removed). - """ - # get some files to add to another dataset - new_files = CatalogRecordV2.objects.get(pk=1).research_dataset["files"] - - self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida - self.cr_test_data["research_dataset"].pop("files", None) - self.cr_test_data["research_dataset"].pop("directories", None) - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - cr_id = response.data["id"] - - file_changes = {"files": new_files} - - # should fail, since user's token has no permission for the newly added files - response = self.client.post(f"/rest/v2/datasets/{cr_id}/files", file_changes, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.content) - - # add project membership to user's token and try again - project_identifier = File.objects.get( - identifier=new_files[0]["identifier"] - ).project_identifier - self.token["group_names"].append("IDA01:%s" % project_identifier) - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.post(f"/rest/v2/datasets/{cr_id}/files", file_changes, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) - - @responses.activate - def test_owner_receives_unfiltered_dataset_data(self): - """ - The general public will have some fields filtered out from the dataset, - in order to protect sensitive data. The owner of a dataset however should - always receive full data. - """ - self._set_cr_owner_to_token_user(1) - - def _check_fields(obj): - for sensitive_field in ["email", "telephone", "phone"]: - self.assertEqual( - sensitive_field in obj["research_dataset"]["curator"][0], - True, - "field %s should be present" % sensitive_field, - ) - - for cr in CatalogRecordV2.objects.filter(pk=1): - cr.research_dataset["curator"][0].update( - { - "email": "email@mail.com", - "phone": "123124", - "telephone": "123124", - } - ) - cr.force_save() - - response = self.client.get("/rest/v2/datasets/1") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - _check_fields(response.data) - - class CatalogRecordExternalServicesAccess(CatalogRecordApiWriteCommon): """ @@ -2252,116 +648,10 @@ def setUp(self): password=django_settings.API_EXT_USER["password"], ) - def test_external_service_can_not_read_all_metadata_in_other_catalog(self): - """ External service should get the same output from someone elses catalog than anonymous user """ - # create a catalog that does not belong to our external service - dc2 = DataCatalog.objects.get(pk=2) - dc2.catalog_json["identifier"] = "Some other catalog" - dc2.catalog_record_services_read = "metax" - dc2.force_save() - - # Create a catalog record that belongs to some other user & our catalog nr2 - cr = CatalogRecordV2.objects.get(pk=12) - cr.user_created = "#### Some owner who is not you ####" - cr.metadata_provider_user = "#### Some owner who is not you ####" - cr.data_catalog = dc2 - cr.editor = None - cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ - "restricted" - ] - cr.force_save() - - # Let's try to return the data with our external services credentials - response_service_user = self.client.get("/rest/v2/datasets/12") - self.assertEqual( - response_service_user.status_code, - status.HTTP_200_OK, - response_service_user.data, - ) - - # Test access as unauthenticated user - self.client._credentials = {} - response_anonymous = self.client.get("/rest/v2/datasets/12") - self.assertEqual( - response_anonymous.status_code, status.HTTP_200_OK, response_anonymous.data - ) - - self.assertEqual( - response_anonymous.data, - response_service_user.data, - "External service with no read-rights should not see any more metadata than anonymous user from a catalog", - ) - - def test_external_service_can_add_catalog_record_to_own_catalog(self): - self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "123456") - - def test_external_service_can_update_catalog_record_in_own_catalog(self): - self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "123456") - - cr_id = response.data["id"] - self.cr_test_data["research_dataset"]["preferred_identifier"] = "654321" - response = self.client.put( - "/rest/v2/datasets/{}".format(cr_id), self.cr_test_data, format="json" - ) - - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["research_dataset"]["preferred_identifier"], "654321") - - def test_external_service_can_delete_catalog_record_from_own_catalog(self): - self.cr_test_data["research_dataset"]["preferred_identifier"] = "123456" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - cr_id = response.data["id"] - response = self.client.delete("/rest/v2/datasets/{}".format(cr_id)) - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - - response = self.client.get("/rest/v2/datasets/{}".format(cr_id), format="json") - self.assertEqual("not found" in response.json()["detail"].lower(), True) - def test_external_service_can_not_add_catalog_record_to_other_catalog(self): dc = self._get_object_from_test_data("datacatalog", requested_index=1) self.cr_test_data["data_catalog"] = dc["catalog_json"]["identifier"] self.cr_test_data["research_dataset"]["preferred_identifier"] = "temp-pid" response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_external_service_can_not_update_catalog_record_in_other_catalog(self): - response = self.client.put("/rest/v2/datasets/1", {}, format="json") - - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_external_service_can_not_delete_catalog_record_from_other_catalog(self): - response = self.client.delete("/rest/v2/datasets/1") - - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_harvested_catalogs_must_have_preferred_identifier_create(self): - # create without preferred identifier - - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual( - "must have preferred identifier" - in response.data["research_dataset"]["preferred_identifier"][0], - True, - ) - - self.cr_test_data["research_dataset"]["preferred_identifier"] = "" - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual( - "must have preferred identifier" - in response.data["research_dataset"]["preferred_identifier"][0], - True, - ) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/directories/read.py b/src/metax_api/tests/api/rest/v2/views/directories/read.py index 6146ab20..3a20880f 100755 --- a/src/metax_api/tests/api/rest/v2/views/directories/read.py +++ b/src/metax_api/tests/api/rest/v2/views/directories/read.py @@ -6,16 +6,13 @@ # :license: MIT import copy -import responses from django.core.management import call_command -from django.db import transaction from django.db.models import Count, Sum from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import CatalogRecord, Directory, File -from metax_api.models.catalog_record import ACCESS_TYPES -from metax_api.tests.utils import TestClassUtils, get_test_oidc_token, test_data_file_path +from metax_api.models import CatalogRecord, Directory +from metax_api.tests.utils import TestClassUtils, test_data_file_path class DirectoryApiReadCommon(APITestCase, TestClassUtils): @@ -33,490 +30,6 @@ def setUp(self): self.pk = dir_from_test_data["id"] self._use_http_authorization() - def _create_test_dirs(self, count): - count = count + 1 - with transaction.atomic(): - for n in range(1, count): - f = self._get_new_file_data(str(n)) - self.client.post("/rest/v2/files", f, format="json") - - def _get_dirs_files_ids(self, url): - file_data = self.client.get(url).data - if isinstance(file_data, dict): - return { - key: [f["id"] for f in file_data[key]] - for key in file_data.keys() - if key in ["directories", "files"] - } - else: - return [f["id"] for f in file_data] - - def _get_new_file_data(self, file_n): - from_test_data = self._get_object_from_test_data("file", requested_index=0) - - path = "/prj_112_root/science_data_C/phase_2/2017/10/dir_" + file_n + "/file_" + file_n - identifier = "urn:nbn:fi:100" + file_n - - from_test_data.update( - { - "checksum": { - "value": "habeebit", - "algorithm": "SHA-256", - "checked": "2017-05-23T10:07:22.559656Z", - }, - "file_name": "tiedosto_name_" + file_n, - "file_path": path, - "identifier": identifier, - "file_storage": self._get_object_from_test_data("filestorage", requested_index=0), - "parent_directory": 24, - "project_identifier": "research_project_112", - } - ) - del from_test_data["id"] - return from_test_data - - -class DirectoryApiReadBasicTests(DirectoryApiReadCommon): - def test_read_directory_list(self): - response = self.client.get("/rest/v2/directories") - self.assertEqual(response.status_code, 501) - - def test_read_directory_details_by_pk(self): - response = self.client.get("/rest/v2/directories/%s" % self.pk) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - hasattr(response, "data"), - True, - "Request response object is missing attribute 'data'", - ) - self.assertEqual("directory_name" in response.data.keys(), True) - self.assertEqual(response.data["identifier"], self.identifier) - - def test_read_directory_details_by_identifier(self): - response = self.client.get("/rest/v2/directories/%s" % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - hasattr(response, "data"), - True, - "Request response object is missing attribute 'data'", - ) - self.assertEqual("directory_name" in response.data.keys(), True) - self.assertEqual(response.data["identifier"], self.identifier) - - def test_read_directory_details_not_found(self): - response = self.client.get("/rest/v2/directories/shouldnotexist") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - -class DirectoryApiReadFileBrowsingTests(DirectoryApiReadCommon): - - """ - Test generic file browsing, should always return all existing files in a dir. - """ - - def test_read_directory_get_files(self): - """ - Test browsing files - """ - response = self.client.get("/rest/v2/directories/2/files") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(response.data["directories"][0]["id"], 3) - self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 2) - self.assertEqual(len(response.data["files"]), 0) - - response = self.client.get("/rest/v2/directories/3/files") - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(response.data["directories"][0]["id"], 4) - self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 3) - self.assertEqual(len(response.data["files"]), 5) - self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 3) - self.assertEqual(response.data["files"][4]["parent_directory"]["id"], 3) - - response = self.client.get("/rest/v2/directories/4/files") - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 4) - self.assertEqual(len(response.data["files"]), 5) - self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 4) - self.assertEqual(response.data["files"][4]["parent_directory"]["id"], 4) - - response = self.client.get("/rest/v2/directories/5/files") - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(len(response.data["files"]), 0) - - response = self.client.get("/rest/v2/directories/6/files") - self.assertEqual(len(response.data["directories"]), 0) - self.assertEqual(len(response.data["files"]), 10) - self.assertEqual(response.data["files"][0]["parent_directory"]["id"], 6) - self.assertEqual(response.data["files"][9]["parent_directory"]["id"], 6) - - def test_read_directory_get_files_recursively(self): - """ - Test query parameter 'recursive'. - """ - - # without depth, returns from depth=1, which should contain no files - response = self.client.get("/rest/v2/directories/1/files?recursive") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data), 0) - - # dir id 1 (the root) contains 0 files, but recursively 20 - response = self.client.get("/rest/v2/directories/1/files?recursive=true&depth=*") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 20) - - # dir id 3 contains 5 files, but recursively 20 - response = self.client.get("/rest/v2/directories/3/files?recursive=true&depth=*") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 20) - - # dir id 4 contains 5 files, but recursively 15 - response = self.client.get("/rest/v2/directories/4/files?recursive=true&depth=*") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 15) - - # dir id 5 contains 0 files - response = self.client.get("/rest/v2/directories/5/files?recursive=true&depth=*") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 10) - - # dir id 6 contains 10 files - response = self.client.get("/rest/v2/directories/6/files?recursive=true&depth=*") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 10) - - def test_read_directory_get_files_file_not_found(self): - response = self.client.get("/rest/v2/directories/not_found/files") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_read_directory_get_project_root_directory(self): - response = self.client.get("/rest/v2/directories/root?project=project_x") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) - self.assertEqual(response.data["id"], 1) - self.assertEqual("directories" in response.data, True) - self.assertEqual("files" in response.data, True) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(response.data["directories"][0]["id"], 2) - - def test_read_directory_get_project_root_directory_not_found(self): - response = self.client.get("/rest/v2/directories/root?project=project_xyz") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_read_directory_get_project_root_directory_parameter_missing(self): - response = self.client.get("/rest/v2/directories/root") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("required" in response.data["detail"][0], True, response.data) - - def test_read_directory_get_files_by_path(self): - dr = Directory.objects.get(pk=2) - response = self.client.get( - "/rest/v2/directories/files?path=%s&project=%s" - % (dr.directory_path, dr.project_identifier) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(response.data["directories"][0]["id"], 3) - self.assertEqual(response.data["directories"][0]["parent_directory"]["id"], 2) - self.assertEqual(len(response.data["files"]), 0) - - def test_read_directory_get_files_by_path_not_found(self): - response = self.client.get( - "/rest/v2/directories/files?path=%s&project=%s" % ("doesnotexist", "doesnotexist") - ) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_read_directory_get_files_by_path_check_parameters(self): - response = self.client.get("/rest/v2/directories/files") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get("/rest/v2/directories/files?path=something") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get("/rest/v2/directories/files?project=something") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_read_directory_recursively_with_max_depth(self): - """ - Should return a flat list of files, three directories deep - """ - response = self.client.get("/rest/v2/directories/2/files?recursive=true&depth=3") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 10) - - def test_read_directory_recursively_with_dirs_only_and_max_depth(self): - """ - Should return a directory hierarchy, three directories deep, with no files at all. - """ - response = self.client.get( - "/rest/v2/directories/2/files?recursive=true&directories_only=true&depth=3" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("directories" in response.data, True) - self.assertEqual("directories" in response.data["directories"][0], True) - self.assertEqual("directories" in response.data["directories"][0]["directories"][0], True) - - def test_read_directory_recursively_with_no_depth(self): - """ - recursive=true with no depth specified should not return everything, but instead depth=1 - by default. - - Using parameter directories_only=true to easier count the depth. - """ - response = self.client.get( - "/rest/v2/directories/3/files?recursive=true&directories_only=true" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("directories" in response.data, True) - self.assertEqual("directories" in response.data["directories"][0], True) - - def test_read_directory_return_directories_only(self): - response = self.client.get("/rest/v2/directories/3/files?directories_only") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual("files" in response.data, False) - - def test_read_directory_with_include_parent(self): - response = self.client.get("/rest/v2/directories/3/files?include_parent") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(len(response.data["files"]), 5) - self.assertEqual(response.data.get("id", None), 3) - - def test_read_directory_files_sorted_by_file_path(self): - response = self.client.get("/rest/v2/directories/3/files") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data["files"][0]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_1", - ) - self.assertEqual( - response.data["files"][1]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_2", - ) - self.assertEqual( - response.data["files"][2]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_3", - ) - - response = self.client.get("/rest/v2/directories/3/files?pagination&limit=2&offset=2") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data["results"]["files"][0]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_2", - ) - self.assertEqual( - response.data["results"]["files"][1]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_3", - ) - - response = self.client.get("/rest/v2/directories/3/files?cr_identifier=2") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data["files"][0]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_3", - ) - self.assertEqual( - response.data["files"][1]["file_path"], - "/project_x_FROZEN/Experiment_X/file_name_4", - ) - - response = self.client.get("/rest/v2/directories/3/files?recursive") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data[0]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_1" - ) - self.assertEqual( - response.data[1]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_2" - ) - self.assertEqual( - response.data[2]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_3" - ) - - response = self.client.get("/rest/v2/directories/3/files?recursive&cr_identifier=2") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data[0]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_3" - ) - self.assertEqual( - response.data[1]["file_path"], "/project_x_FROZEN/Experiment_X/file_name_4" - ) - - def test_read_directory_directories_sorted_by_directory_path(self): - - response = self.client.get("/rest/v2/directories/8/files") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") - self.assertEqual( - response.data["directories"][1]["directory_path"], - "/prj_112_root/random_folder", - response.data, - ) - self.assertEqual( - response.data["directories"][2]["directory_path"], - "/prj_112_root/science_data_A", - ) - self.assertEqual( - response.data["directories"][3]["directory_path"], - "/prj_112_root/science_data_B", - ) - self.assertEqual( - response.data["directories"][4]["directory_path"], - "/prj_112_root/science_data_C", - ) - - response = self.client.get("/rest/v2/directories/8/files?pagination&limit=2&offset=2") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - response.data["results"]["directories"][0]["directory_path"], - "/prj_112_root/science_data_A", - ) - self.assertEqual( - response.data["results"]["directories"][1]["directory_path"], - "/prj_112_root/science_data_B", - ) - - response = self.client.get("/rest/v2/directories/8/files?directories_only") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") - self.assertEqual( - response.data["directories"][1]["directory_path"], - "/prj_112_root/random_folder", - response.data, - ) - self.assertEqual( - response.data["directories"][2]["directory_path"], - "/prj_112_root/science_data_A", - ) - self.assertEqual( - response.data["directories"][3]["directory_path"], - "/prj_112_root/science_data_B", - ) - self.assertEqual( - response.data["directories"][4]["directory_path"], - "/prj_112_root/science_data_C", - ) - - response = self.client.get("/rest/v2/directories/8/files?cr_identifier=13") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["directories"][0]["directory_path"], "/prj_112_root/other") - self.assertEqual( - response.data["directories"][1]["directory_path"], - "/prj_112_root/random_folder", - ) - self.assertEqual( - response.data["directories"][2]["directory_path"], - "/prj_112_root/science_data_A", - ) - self.assertEqual( - response.data["directories"][3]["directory_path"], - "/prj_112_root/science_data_B", - ) - - -class DirectoryApiReadFileBrowsingRetrieveSpecificFieldsTests(DirectoryApiReadCommon): - def test_retrieve_requested_directory_fields_only(self): - - response = self.client.get( - "/rest/v2/directories/3/files?directory_fields=identifier,directory_path" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"][0].keys()), 2) - self.assertEqual("identifier" in response.data["directories"][0], True) - self.assertEqual("directory_path" in response.data["directories"][0], True) - - self._use_http_authorization(username="metax") - - response = self.client.get( - "/rest/v2/directories/17/files? \ - cr_identifier=13&directory_fields=directory_name&directories_only&recursive" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"][0].keys()), 2) - self.assertTrue("directories" in response.data["directories"][0]) - self.assertTrue("directory_name" in response.data["directories"][0]) - self.assertFalse("id" in response.data["directories"][0]) - - def test_retrieve_directory_byte_size_and_file_count(self): - """ - There is some additional logic involved in retrieving byte_size and file_count, which warrants - targeted tests for just those fields. - """ - response = self.client.get( - "/rest/v2/directories/3/files?directory_fields=identifier,byte_size" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"][0].keys()), 2) - self.assertEqual("identifier" in response.data["directories"][0], True) - self.assertEqual("byte_size" in response.data["directories"][0], True) - - response = self.client.get( - "/rest/v2/directories/3/files?directory_fields=identifier,file_count" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"][0].keys()), 2) - self.assertEqual("identifier" in response.data["directories"][0], True) - self.assertEqual("file_count" in response.data["directories"][0], True) - - response = self.client.get( - "/rest/v2/directories/3/files?directory_fields=identifier,file_count&cr_identifier=3" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"][0].keys()), 2) - self.assertEqual("identifier" in response.data["directories"][0], True) - self.assertEqual("file_count" in response.data["directories"][0], True) - - response = self.client.get( - "/rest/v2/directories/3/files?directory_fields=identifier,file_count¬_cr_identifier=2" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"][0].keys()), 2) - self.assertEqual("identifier" in response.data["directories"][0], True) - self.assertEqual("file_count" in response.data["directories"][0], True) - - def test_retrieve_requested_file_fields_only(self): - response = self.client.get("/rest/v2/directories/3/files?file_fields=identifier,file_path") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["files"][0].keys()), 2) - self.assertEqual("identifier" in response.data["files"][0], True) - self.assertEqual("file_path" in response.data["files"][0], True) - - def test_retrieve_requested_file_and_directory_fields_only(self): - response = self.client.get( - "/rest/v2/directories/3/files?file_fields=identifier&directory_fields=id" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"][0].keys()), 1) - self.assertEqual("identifier" in response.data["files"][0], True) - self.assertEqual(len(response.data["directories"][0].keys()), 1) - self.assertEqual("id" in response.data["directories"][0], True) - - def test_not_retrieving_not_allowed_directory_fields(self): - from metax_api.api.rest.base.serializers import DirectorySerializer, FileSerializer - - allowed_dir_fields = set(DirectorySerializer.Meta.fields) - allowed_file_fields = set(FileSerializer.Meta.fields) - - response = self.client.get( - "/rest/v2/directories/3/files?file_fields=parent,id&directory_fields=;;drop db;,id" - ) - - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue( - any(field in response.data["files"][0].keys() for field in allowed_file_fields) - ) - self.assertTrue( - any(field in response.data["directories"][0].keys() for field in allowed_dir_fields) - ) - - response = self.client.get( - "/rest/v2/directories/3/files?file_fields=parent&directory_fields=or" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - response = self.client.get("/rest/v2/directories/3/files?file_fields=parent") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - response = self.client.get("/rest/v2/directories/3/files?directory_fields=or") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - class DirectoryApiReadCatalogRecordFileBrowsingTests(DirectoryApiReadCommon): @@ -531,143 +44,6 @@ def setUp(self): self.client.get("/rest/v2/directories/update_byte_sizes_and_file_counts") self.client.get("/rest/v2/datasets/update_cr_directory_browsing_data") - def test_read_directory_catalog_record_and_not_catalog_record_not_ok(self): - """ - Test query parameter 'cr_identifier' and 'not_cr_identifier' can not be queried together. - """ - response = self.client.get( - "/rest/v2/directories/3/files?cr_identifier=1¬_cr_identifier=2" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue( - "one query parameter of 'cr_identifier' and 'not_cr_identifier'" - in response.data["detail"][0] - ) - - def test_read_directory_for_catalog_record(self): - """ - Test query parameter 'cr_identifier'. - """ - response = self.client.get( - "/rest/v2/directories/3/files?cr_identifier=%s" - % CatalogRecord.objects.get(pk=1).identifier - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("directories" in response.data, True) - self.assertEqual("files" in response.data, True) - self.assertEqual(len(response.data["directories"]), 0) - self.assertEqual(len(response.data["files"]), 2) - for f in response.data["files"]: - self.assertTrue(f["parent_directory"]["id"], 1) - - def test_read_directory_for_not_catalog_record(self): - """ - Test query parameter 'not_cr_identifier'. - """ - response = self.client.get("/rest/v2/directories/3/files?not_cr_identifier=2") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 3, response.data) - for f in response.data["files"]: - self.assertNotEqual(f["parent_directory"]["id"], 2, response.data) - self.assertEqual(len(response.data["directories"]), 1, response.data) - self.assertNotEqual(response.data["directories"][0]["parent_directory"]["id"], 2) - - def test_read_directory_for_catalog_record_not_found(self): - """ - Not found cr_identifier should raise 400 instead of 404, which is raised when the - directory itself is not found. the error contains details about the 400. - """ - response = self.client.get("/rest/v2/directories/3/files?cr_identifier=notexisting") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_read_directory_for_not_catalog_record_not_found(self): - """ - Not found cr_identifier should raise 400 instead of 404, which is raised when the - directory itself is not found. the error contains details about the 400. - """ - response = self.client.get("/rest/v2/directories/3/files?not_cr_identifier=notexisting") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_read_directory_for_catalog_record_directory_does_not_exist(self): - """ - A directory may have files in a project, but those files did not necessarily exist - or were not selected for a specific CR. - """ - - # should be OK... - response = self.client.get("/rest/v2/directories/4/files") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(len(response.data["files"]), 5) - - # ... but should not contain any files FOR THIS CR - response = self.client.get( - "/rest/v2/directories/4/files?cr_identifier=%s" - % CatalogRecord.objects.get(pk=1).identifier - ) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - # ... and should contain files ALL BUT THIS CR - response = self.client.get( - "/rest/v2/directories/4/files?not_cr_identifier=%s" - % CatalogRecord.objects.get(pk=1).identifier - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(len(response.data["files"]), 5) - - def test_read_directory_for_catalog_record_recursively(self): - """ - Test query parameters 'cr_identifier' with 'recursive'. - """ - response = self.client.get( - "/rest/v2/directories/1/files?recursive&cr_identifier=%s&depth=*" - % CatalogRecord.objects.get(pk=1).identifier - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - file_list = list(File.objects.filter(record__pk=1).values_list("id", flat=True)) - self.assertEqual(len(response.data), len(file_list)) - for f in response.data: - self.assertTrue(f["id"] in file_list) - - response = self.client.get( - "/rest/v2/directories/1/files?recursive&cr_identifier=1&depth=*&directories_only" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(len(response.data["directories"][0]["directories"]), 1) - self.assertEqual(len(response.data["directories"][0]["directories"][0]["directories"]), 0) - self.assertFalse(response.data.get("files")) - - # not found cr_identifier should raise 400 instead of 404, which is raised when the - # directory itself is not found. the error contains details about the 400 - response = self.client.get( - "/rest/v2/directories/1/files?recursive&cr_identifier=notexisting" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_read_directory_for_not_catalog_record_recursively(self): - """ - Test query parameters 'not_cr_identifier' with 'recursive'. - """ - file_recursive = self.client.get("/rest/v2/directories/1/files?recursive&depth=*").data - file_list = list(File.objects.filter(record__pk=1).values_list("id", flat=True)) - response = self.client.get( - "/rest/v2/directories/1/files?recursive&depth=*¬_cr_identifier=%s" - % CatalogRecord.objects.get(pk=1).identifier - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data), len(file_recursive) - len(file_list)) - for f in response.data: - self.assertTrue(f["id"] not in file_list) - - # not found not_cr_identifier should raise 400 instead of 404, which is raised when the - # directory itself is not found. the error contains details about the 400 - response = self.client.get( - "/rest/v2/directories/1/files?recursive¬_cr_identifier=notexisting" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - def test_directory_byte_size_and_file_count(self): """ Test byte size and file count are calculated correctly for directories when browsing files @@ -815,893 +191,4 @@ def _get_parents(pk): for id in set(dirs): _assertDirectoryData(id, parent_data) - _assertDirectoryData_not_cr_id(id, parent_data) - - -class DirectoryApiReadCatalogRecordFileBrowsingAuthorizationTests(DirectoryApiReadCommon): - - """ - Test browsing files in the context of a specific CatalogRecord from authorization perspective - """ - - # THE OK TESTS - - def test_returns_ok_for_open_catalog_record_if_no_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files even without authorization - # for open catalog record - self._assert_ok(open_cr_json, "no") - - def test_returns_ok_for_login_catalog_record_if_no_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( - use_login_access_type=True - ) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files even without authorization - # for login catalog record - self._assert_ok(login_cr_json, "no") - - def test_returns_ok_for_open_catalog_record_if_service_authorization(self): - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with service - # authorization for open catalog record - self._assert_ok(open_cr_json, "service") - - def test_returns_ok_for_login_catalog_record_if_service_authorization(self): - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details( - use_login_access_type=True - ) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with service - # authorization for login catalog record - self._assert_ok(login_cr_json, "service") - - @responses.activate - def test_returns_ok_for_open_catalog_record_if_owner_authorization(self): - self.create_end_user_data_catalogs() - open_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with owner authorization for - # owner-owned open catalog record - self._assert_ok(open_cr_json, "owner") - - @responses.activate - def test_returns_ok_for_login_catalog_record_if_owner_authorization(self): - self.create_end_user_data_catalogs() - login_cr_json = self.get_open_cr_with_files_and_dirs_from_api_with_file_details(True) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with owner authorization for - # owner-owned login_cr_json catalog record - self._assert_ok(login_cr_json, "owner") - - def test_returns_ok_for_restricted_catalog_record_if_service_authorization(self): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with service - # authorization for restricted catalog record - self._assert_ok(restricted_cr_json, "service") - - @responses.activate - def test_returns_ok_for_restricted_catalog_record_if_owner_authorization(self): - self.create_end_user_data_catalogs() - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details( - True - ) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files with owner authorization for - # owner-owned restricted catalog record - self._assert_ok(restricted_cr_json, "owner") - - def test_returns_ok_for_embargoed_catalog_record_if_available_reached_and_no_authorization( - self, - ): - available_embargoed_cr_json = ( - self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(True) - ) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns dir files without authorization - # for embargoed catalog record whose embargo date has been reached - self._assert_ok(available_embargoed_cr_json, "no") - - # THE FORBIDDEN TESTS - - def test_returns_forbidden_for_restricted_catalog_record_if_no_authorization(self): - restricted_cr_json = self.get_restricted_cr_with_files_and_dirs_from_api_with_file_details() - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns forbidden without authorization - # for restricted catalog record - self._assert_forbidden(restricted_cr_json, "no") - - def test_returns_forbidden_for_embargoed_catalog_record_if_available_not_reached_and_no_authorization( - self, - ): - not_available_embargoed_cr_json = ( - self.get_embargoed_cr_with_files_and_dirs_from_api_with_file_details(False) - ) - - # Verify /rest/v2/directories//files?cr_identifier=cr_id returns forbidden without authorization - # for embargoed catalog record whose embargo date has not been reached - # Deactivate credentials - self._assert_forbidden(not_available_embargoed_cr_json, "no") - - def _assert_forbidden(self, cr_json, credentials_type): - dir_id = cr_json["research_dataset"]["directories"][0]["identifier"] - cr_id = cr_json["identifier"] - self._set_http_authorization(credentials_type) - response = self.client.get( - "/rest/v2/directories/{0}/files?cr_identifier={1}".format(dir_id, cr_id) - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - response = self.client.get( - "/rest/v2/directories/{0}/files?not_cr_identifier={1}".format(dir_id, cr_id) - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - def _assert_ok(self, cr_json, credentials_type): - dir_file_amt = cr_json["research_dataset"]["directories"][0]["details"]["file_count"] - dir_id = cr_json["research_dataset"]["directories"][0]["identifier"] - cr_id = cr_json["identifier"] - self._set_http_authorization(credentials_type) - response = self.client.get( - "/rest/v2/directories/{0}/files?cr_identifier={1}&recursive&depth=*".format( - dir_id, cr_id - ) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data), dir_file_amt) - - response = self.client.get( - "/rest/v2/directories/{0}/files?not_cr_identifier={1}&recursive&depth=*".format( - dir_id, cr_id - ) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data), 0) - - -class DirectoryApiReadQueryFiltersTogetherTests(DirectoryApiReadCommon): - """ - Test browsing files and directories with different filtering combinations: - return ok if should work together and not ok if on opposite. - - recursive : If directories_only=true is also specified, returns a hierarchial directory tree instead, of x depth. - - depth: Max depth of recursion. Value must be an integer > 0, or *. default value is 1. - - directories_only: Omit files entirely from the returned results. Use together with recursive=true and depth=x - to get a directory tree. - - include_parent: Includes the 'parent directory' of the contents being fetched in the results also - - cr_identifier: Browse only files that have been selected for that record. - Incompatible with `not_cr_identifier` parameter. - - not_cr_identifier: Browse only files that have not been selected for that record. - Incompatible with `cr_identifier` parameter. - - file_fields: Field names to retrieve for files - - directory_fields: Field names to retrieve for directories - - file_name: Substring search from file names - - directory_name: Substring search from directory names - - pagination - - offset - - limit - """ - - def test_browsing_directories_with_filters(self): - # directory filters including directories_only - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&directory_fields=id&directory_name=phase" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # file filters are not suppose to break anything with directories_only - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds cr_identifier - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds not_cr_identifier - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent¬_cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # cr_identifier and not_cr_identifier are NOT suppose to work together - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&cr_identifier=11¬_cr_identifier=13& \ - directory_fields=id&directory_name=phase&file_fields=id&file_name=2" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - - # adds pagination - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&pagination" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds recursive - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&recursive&depth=*" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds recursive and pagination - response = self.client.get( - "/rest/v2/directories/17/files? \ - directories_only&include_parent&cr_identifier=13&directory_fields=id&directory_name=phase& \ - file_fields=id&file_name=2&recursive&depth=*" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_browsing_files_and_directories_with_filters(self): - # file filters - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&file_fields=id&file_name=file" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds directory filters - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds cr_identifier - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds not_cr_identifier - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent¬_cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # cr_identifier and not_cr_identifier are NOT suppose to work together - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&cr_identifier=13¬_cr_identifier=11&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - - # adds recursive, directory filters are not suppose to break anything - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&recursive&depth=*" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds pagination - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&pagination" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - # adds recursive and pagination - response = self.client.get( - "/rest/v2/directories/17/files? \ - include_parent&cr_identifier=13&file_fields=id&file_name=file& \ - directory_fields=id&directory_name=phase&recursive&depth=*&pagination" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - -class DirectoryApiReadCatalogRecordFileBrowsingRetrieveSpecificFieldsTests(DirectoryApiReadCommon): - def setUp(self): - super().setUp() - CatalogRecord.objects.get(pk=12).calculate_directory_byte_sizes_and_file_counts() - - def test_retrieve_requested_directory_fields_only(self): - response = self.client.get( - "/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,directory_path" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - len(response.data["research_dataset"]["directories"][0]["details"].keys()), - 2, - ) - self.assertEqual( - "identifier" in response.data["research_dataset"]["directories"][0]["details"], - True, - ) - self.assertEqual( - "directory_path" in response.data["research_dataset"]["directories"][0]["details"], - True, - ) - - def test_retrieve_directory_byte_size_and_file_count(self): - """ - There is some additional logic involved in retrieving byte_size and file_count, which warrants - targeted tests for just those fields. - """ - response = self.client.get( - "/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,byte_size" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - len(response.data["research_dataset"]["directories"][0]["details"].keys()), - 2, - ) - self.assertEqual( - "identifier" in response.data["research_dataset"]["directories"][0]["details"], - True, - ) - self.assertEqual( - "byte_size" in response.data["research_dataset"]["directories"][0]["details"], - True, - ) - - response = self.client.get( - "/rest/v2/datasets/12?include_user_metadata&file_details&directory_fields=identifier,file_count" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - len(response.data["research_dataset"]["directories"][0]["details"].keys()), - 2, - ) - self.assertEqual( - "identifier" in response.data["research_dataset"]["directories"][0]["details"], - True, - ) - self.assertEqual( - "file_count" in response.data["research_dataset"]["directories"][0]["details"], - True, - ) - - def test_retrieve_requested_file_fields_only(self): - response = self.client.get( - "/rest/v2/datasets/12?include_user_metadata&file_details&file_fields=identifier,file_path" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["research_dataset"]["files"][0]["details"].keys()), 2) - self.assertEqual( - "identifier" in response.data["research_dataset"]["files"][0]["details"], - True, - ) - self.assertEqual( - "file_path" in response.data["research_dataset"]["files"][0]["details"], - True, - ) - - def test_retrieve_requested_file_and_directory_fields_only(self): - response = self.client.get( - "/rest/v2/datasets/12?include_user_metadata&file_details&file_fields=identifier&directory_fields=id" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["research_dataset"]["files"][0]["details"].keys()), 1) - self.assertEqual( - "identifier" in response.data["research_dataset"]["files"][0]["details"], - True, - ) - self.assertEqual( - len(response.data["research_dataset"]["directories"][0]["details"].keys()), - 1, - ) - self.assertEqual( - "id" in response.data["research_dataset"]["directories"][0]["details"], True - ) - - -class DirectoryApiReadEndUserAccess(DirectoryApiReadCommon): - - """ - Test End User Access permissions when browsing files using /rest/v2/directories api. - - Note: In these tests, the token by default does not have correct project groups. - Token project groups are only made valid by calling _update_token_with_project_of_directory(). - """ - - def setUp(self): - super().setUp() - self.token = get_test_oidc_token() - self._mock_token_validation_succeeds() - - def _update_token_with_project_of_directory(self, dir_id): - proj = Directory.objects.get(pk=dir_id).project_identifier - self.token["group_names"].append("IDA01:%s" % proj) - self._use_http_authorization(method="bearer", token=self.token) - - @responses.activate - def test_user_can_browse_files_from_their_projects(self): - """ - Ensure users can only read files from /rest/v2/directories owned by them. - """ - self._use_http_authorization(method="bearer", token=self.token) - - # first read files without project access - should fail - response = self.client.get("/rest/v2/directories/1") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get("/rest/v2/directories/1/files") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - # set user to same project as previous files and try again. should now succeed - self._update_token_with_project_of_directory(1) - - response = self.client.get("/rest/v2/directories/1") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get("/rest/v2/directories/1/files") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - @responses.activate - def test_browsing_by_project_and_file_path_is_protected(self): - self._use_http_authorization(method="bearer", token=self.token) - - dr = Directory.objects.get(pk=2) - response = self.client.get( - "/rest/v2/directories/files?path=%s&project=%s" - % (dr.directory_path, dr.project_identifier) - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - self._update_token_with_project_of_directory(2) - response = self.client.get( - "/rest/v2/directories/files?path=%s&project=%s" - % (dr.directory_path, dr.project_identifier) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - @responses.activate - def test_browsing_in_cr_context(self): - """ - Cr with open access type should be available for any end-user api user. Browsing files for a cr with restricted - access type should be forbidden for non-owner (or service) user. - """ - cr = CatalogRecord.objects.get(pk=1) - self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get( - "/rest/v2/directories/3/files?cr_identifier={0}".format(cr.identifier) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ - "restricted" - ] - cr.force_save() - - response = self.client.get( - "/rest/v2/directories/3/files?cr_identifier={0}".format(cr.identifier) - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - @responses.activate - def test_browsing_in_not_cr_context(self): - """ - Cr with open access type should be available for any end-user api user. Browsing files for a cr with restricted - access type should be forbidden for non-owner (or service) user. - """ - cr = CatalogRecord.objects.get(pk=1) - self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get( - "/rest/v2/directories/3/files?not_cr_identifier={0}".format(cr.identifier) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ - "restricted" - ] - cr.force_save() - - response = self.client.get( - "/rest/v2/directories/3/files?not_cr_identifier={0}".format(cr.identifier) - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - -class DirectoryApiReadPaginationTests(DirectoryApiReadCommon): - - """ - Test paginated directory and file browsing. - Should return directories and/or files depending on limit and offset parameters. Defaul is ofcet is 10 - """ - - def setUp(self): - self._use_http_authorization() - self._create_test_dirs(14) - - def test_read_directory_with_default_limit_pagination(self): - """ - Test browsing files with pagination - """ - file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files") - - response = self.client.get("/rest/v2/directories/24/files?pagination") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 10) - self.assertEqual(len(response.data["results"]["files"]), 0) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][0], - ) - self.assertEqual( - response.data["results"]["directories"][9]["id"], - file_dict["directories"][9], - ) - - next_link = response.data["next"].split("http://testserver")[1] - response = self.client.get(next_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 4) - self.assertEqual(len(response.data["results"]["files"]), 6) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][10], - ) - self.assertEqual( - response.data["results"]["directories"][3]["id"], - file_dict["directories"][13], - ) - self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) - self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) - - next_link = response.data["next"].split("http://testserver")[1] - response = self.client.get(next_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 0) - self.assertEqual(len(response.data["results"]["files"]), 10) - self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][6]) - self.assertEqual(response.data["results"]["files"][9]["id"], file_dict["files"][15]) - - prev_link = response.data["previous"].split("http://testserver")[1] - response = self.client.get(prev_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 4) - self.assertEqual(len(response.data["results"]["files"]), 6) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][10], - ) - self.assertEqual( - response.data["results"]["directories"][3]["id"], - file_dict["directories"][13], - ) - self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) - self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) - - def test_read_directory_with_custom_limit_pagination(self): - file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files") - - response = self.client.get("/rest/v2/directories/24/files?limit=4&offset=12&pagination") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 2) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][12], - ) - self.assertEqual( - response.data["results"]["directories"][1]["id"], - file_dict["directories"][13], - ) - self.assertEqual(len(response.data["results"]["files"]), 2) - self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) - self.assertEqual(response.data["results"]["files"][1]["id"], file_dict["files"][1]) - - next_link = response.data["next"].split("http://testserver")[1] - prev_link = response.data["previous"].split("http://testserver")[1] - - response = self.client.get(next_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 0) - self.assertEqual(len(response.data["results"]["files"]), 4) - self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][2]) - self.assertEqual(response.data["results"]["files"][3]["id"], file_dict["files"][5]) - - response = self.client.get(prev_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 4) - self.assertEqual(len(response.data["results"]["files"]), 0) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][8], - ) - self.assertEqual( - response.data["results"]["directories"][3]["id"], - file_dict["directories"][11], - ) - - def test_read_directory_with_recursive_and_pagination(self): - """ - Query with recursive flag must return only files as a list - """ - file_list = self._get_dirs_files_ids("/rest/v2/directories/24/files?recursive") - - response = self.client.get("/rest/v2/directories/24/files?recursive&pagination") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 10) - self.assertEqual(response.data["results"][0]["id"], file_list[0]) - self.assertEqual(response.data["results"][9]["id"], file_list[9]) - - next_link = response.data["next"].split("http://testserver")[1] - response = self.client.get(next_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 10) - self.assertEqual(response.data["results"][0]["id"], file_list[10]) - self.assertEqual(response.data["results"][9]["id"], file_list[19]) - - prev_link = response.data["previous"].split("http://testserver")[1] - response = self.client.get(prev_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 10) - self.assertEqual(response.data["results"][0]["id"], file_list[0]) - self.assertEqual(response.data["results"][9]["id"], file_list[9]) - - def test_read_directory_with_dirs_only_and_pagination(self): - """ - Query with directories_only flag must return only directories - """ - file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files?directories_only")[ - "directories" - ] - - response = self.client.get("/rest/v2/directories/24/files?directories_only&pagination=true") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["results"]["directories"]), 10) - self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[0]) - self.assertEqual(response.data["results"]["directories"][9]["id"], file_dict[9]) - - next_link = response.data["next"].split("http://testserver")[1] - response = self.client.get(next_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 4) - self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[10]) - self.assertEqual(response.data["results"]["directories"][3]["id"], file_dict[13]) - - prev_link = response.data["previous"].split("http://testserver")[1] - response = self.client.get(prev_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 10) - self.assertEqual(response.data["results"]["directories"][0]["id"], file_dict[0]) - self.assertEqual(response.data["results"]["directories"][9]["id"], file_dict[9]) - - def test_read_directory_with_parent_and_pagination(self): - """ - Query with directories_only flag must return only directories - """ - file_dict = self._get_dirs_files_ids("/rest/v2/directories/24/files?include_parent") - - response = self.client.get("/rest/v2/directories/24/files?include_parent&pagination=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 10) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][0], - ) - self.assertEqual( - response.data["results"]["directories"][9]["id"], - file_dict["directories"][9], - ) - self.assertEqual(response.data["results"]["id"], 24) - self.assertEqual(response.data["results"]["directory_name"], "10") - - next_link = response.data["next"].split("http://testserver")[1] - response = self.client.get(next_link) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 4) - self.assertEqual(len(response.data["results"]["files"]), 6) - self.assertEqual( - response.data["results"]["directories"][0]["id"], - file_dict["directories"][10], - ) - self.assertEqual( - response.data["results"]["directories"][3]["id"], - file_dict["directories"][13], - ) - self.assertEqual(response.data["results"]["files"][0]["id"], file_dict["files"][0]) - self.assertEqual(response.data["results"]["files"][5]["id"], file_dict["files"][5]) - self.assertEqual(response.data["results"]["id"], 24) - self.assertEqual(response.data["results"]["directory_name"], "10") - - -class DirectoryApiReadFileNameDirectoryNameTests(DirectoryApiReadCommon): - - """ - Test browsing files with queries on file and directory names. - """ - - def setUp(self): - self._use_http_authorization() - self._create_test_dirs(5) - - def test_browsing_directory_with_file_name(self): - - response = self.client.get("/rest/v2/directories/24/files?file_name=") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["files"]), 51) - - response = self.client.get("/rest/v2/directories/24/files?file_name=_name_1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["files"]), 21) - - response = self.client.get("/rest/v2/directories/24/files?file_name=0") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["files"]), 15) - - response = self.client.get("/rest/v2/directories/24/files?file_name=_name_118") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["files"]), 1) - - def test_browsing_directory_with_directory_name(self): - - response = self.client.get("/rest/v2/directories/24/files?directory_name=") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 5) - - response = self.client.get("/rest/v2/directories/24/files?directory_name=dir_1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - - response = self.client.get("/rest/v2/directories/24/files?directory_name=dir") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 5) - - def test_browsing_directory_with_directory_and_file_name(self): - - response = self.client.get("/rest/v2/directories/24/files?directory_name=&file_name=") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 5) - self.assertEqual(len(response.data["files"]), 51) - - response = self.client.get( - "/rest/v2/directories/24/files?directory_name=dir_1&file_name=file_name_120" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(len(response.data["files"]), 1) - - response = self.client.get( - "/rest/v2/directories/24/files?directory_name=dir&file_name=not_existing" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 5) - self.assertEqual(len(response.data["files"]), 0) - - def test_browsing_directory_with_file_and_dir_name_and_pagination(self): - # second page should return last filtered files - response = self.client.get( - "/rest/v2/directories/24/files?file_name=0&pagination&limit=10&offset=10" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 0) - self.assertEqual(len(response.data["results"]["files"]), 10) - - # first page with limit of 3 should return first filtered directories - response = self.client.get( - "/rest/v2/directories/24/files?directory_name=dir_&pagination&limit=3" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 3) - self.assertEqual(len(response.data["results"]["files"]), 0) - - # first page with limit of 3 should return first filtered directories - response = self.client.get( - "/rest/v2/directories/24/files?directory_name=dir_1&file_name=0&pagination" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]["directories"]), 1) - self.assertEqual(len(response.data["results"]["files"]), 9) - - def test_browsing_directory_with_directory_and_file_name_and_dirs_only(self): - - response = self.client.get( - "/rest/v2/directories/24/files?file_name=_name_11&directories_only" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 5) - self.assertEqual(response.data.get("files"), None) - - response = self.client.get( - "/rest/v2/directories/24/files?directory_name=dir_5&directories_only" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["directories"]), 1) - self.assertEqual(response.data.get("files"), None) - - def test_browsing_directory_with_directory_and_file_name_and_recursive(self): - - response = self.client.get("/rest/v2/directories/24/files?file_name=_name_11&recursive") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 10) - - # should have one file from directory with the rest of filtered files - response = self.client.get( - "/rest/v2/directories/24/files?directory_name=dir_5&file_name=5&recursive&depth=*" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), 6) - - def test_browsing_directory_with_directory_and_file_name_and_cr_identifier_and_not_cr_identifier( - self, - ): - # tests for directory_name and cr_identifier - response = self.client.get( - "/rest/v2/directories/17/files?directory_name=2&cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 1) - - response = self.client.get( - "/rest/v2/directories/17/files?directory_name=phase&cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 2) - - response = self.client.get("/rest/v2/directories/17/files?directory_name=&cr_identifier=13") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 2) - - # tests for directory_name and not_cr_identifier - response = self.client.get( - "/rest/v2/directories/17/files?directory_name=phase¬_cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 0) - - response = self.client.get( - "/rest/v2/directories/17/files?directory_name=2¬_cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["directories"]), 0) - - # tests for file_name and cr_identifier - response = self.client.get("/rest/v2/directories/12/files?file_name=22&cr_identifier=13") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 1) - - response = self.client.get("/rest/v2/directories/12/files?file_name=name_&cr_identifier=13") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 3) - - response = self.client.get("/rest/v2/directories/12/files?file_name=&cr_identifier=13") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 3) - - response = self.client.get( - "/rest/v2/directories/17/files?file_name=&cr_identifier=13&directories_only" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get("files"), None) - - # tests for file_name and not_cr_identifier - response = self.client.get( - "/rest/v2/directories/16/files?file_name=name¬_cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 1) - - response = self.client.get( - "/rest/v2/directories/16/files?file_name=name_2¬_cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 1) - - response = self.client.get( - "/rest/v2/directories/16/files?file_name=name_1¬_cr_identifier=13" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["files"]), 0) + _assertDirectoryData_not_cr_id(id, parent_data) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/files/__init__.py b/src/metax_api/tests/api/rest/v2/views/files/__init__.py index 6adb6e69..d60060fb 100755 --- a/src/metax_api/tests/api/rest/v2/views/files/__init__.py +++ b/src/metax_api/tests/api/rest/v2/views/files/__init__.py @@ -5,5 +5,4 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from .read import * from .write import * diff --git a/src/metax_api/tests/api/rest/v2/views/files/read.py b/src/metax_api/tests/api/rest/v2/views/files/read.py deleted file mode 100755 index d560630e..00000000 --- a/src/metax_api/tests/api/rest/v2/views/files/read.py +++ /dev/null @@ -1,396 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -import responses -from django.core.management import call_command -from django.db import connection -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.models import File -from metax_api.tests.utils import TestClassUtils, get_test_oidc_token, test_data_file_path - - -class FileApiReadCommon(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(FileApiReadCommon, cls).setUpClass() - - def setUp(self): - file_from_test_data = self._get_object_from_test_data("file") - self.identifier = file_from_test_data["identifier"] - self.pk = file_from_test_data["id"] - self._use_http_authorization() - - -class FileApiReadBasicTests(FileApiReadCommon): - def test_read_file_list(self): - response = self.client.get("/rest/v2/files") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_file_list_filter_by_project(self): - proj = File.objects.get(pk=1).project_identifier - file_count = File.objects.filter(project_identifier=proj).count() - response = self.client.get("/rest/v2/files?project_identifier=%s" % proj) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["count"], file_count) - - def test_read_file_list_filter_by_project_and_path(self): - proj = File.objects.get(pk=1).project_identifier - path = "/project_x_FROZEN/Experiment_X/Phase_1/2017/01" - file_count = File.objects.filter(project_identifier=proj, file_path__contains=path).count() - response = self.client.get( - "/rest/v2/files?project_identifier=%s&file_path=%s" % (proj, path) - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["count"], file_count) - - # missing project_identifier - response = self.client.get("/rest/v2/files?file_path=%s" % path) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_read_file_details_by_pk(self): - response = self.client.get("/rest/v2/files/%s" % self.pk) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - hasattr(response, "data"), - True, - "Request response object is missing attribute 'data'", - ) - self.assertEqual("file_name" in response.data, True) - self.assertEqual(response.data["identifier"], self.identifier) - self.assertEqual("identifier" in response.data["file_storage"], True) - - def test_read_file_details_by_identifier(self): - response = self.client.get("/rest/v2/files/%s" % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual( - hasattr(response, "data"), - True, - "Request response object is missing attribute 'data'", - ) - self.assertEqual("file_name" in response.data.keys(), True) - self.assertEqual(response.data["identifier"], self.identifier) - - def test_read_file_details_not_found(self): - response = self.client.get("/rest/v2/files/shouldnotexist") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_read_file_details_checksum_relation(self): - response = self.client.get("/rest/v2/files/%s" % self.pk) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("checksum" in response.data, True) - self.assertEqual("value" in response.data["checksum"], True) - - def test_expand_relations(self): - response = self.client.get("/rest/v2/files/1?expand_relation=file_storage,parent_directory") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - "file_storage_json" in response.data["file_storage"], - True, - response.data["file_storage"], - ) - self.assertEqual( - "date_created" in response.data["parent_directory"], - True, - response.data["parent_directory"], - ) - - -class FileApiReadGetRelatedDatasets(FileApiReadCommon): - def test_get_related_datasets_ok_1(self): - """ - File pk 1 should belong to only 3 datasets - """ - response = self.client.post("/rest/v2/files/datasets", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 3) - - def test_get_related_datasets_ok_2(self): - """ - File identifiers listed below should belong to 5 datasets - """ - file_identifiers = File.objects.filter(id__in=[1, 2, 3, 4, 5]).values_list( - "identifier", flat=True - ) - response = self.client.post("/rest/v2/files/datasets", file_identifiers, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 5) - - def test_keysonly(self): - """ - Parameter ?keysonly should return just values - """ - response = self.client.post( - "/rest/v2/files/datasets?keys=files&keysonly", [1, 2, 121], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 2) # pid:urn:121 does not belong to any dataset - self.assertEqual(type(response.data), list, type(response.data)) # no dict keys - - response = self.client.post( - "/rest/v2/files/datasets?keys=files&keysonly=false", [1, 2], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(type(response.data), dict, response.data) # Return by keys - - response = self.client.post( - "/rest/v2/files/datasets?keys=datasets&keysonly", [1, 2, 14], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 2) # Only datasets 1 and 2 have files - self.assertEqual(type(response.data), list, type(response.data)) # no dict keys - - def test_get_detailed_related_datasets_ok_1(self): - """ - File identifiers listed below should belong to 3 datasets - """ - response = self.client.post("/rest/v2/files/datasets?keys=files", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 1) - self.assertEqual(len(list(response.data.values())[0]), 3, response.data) - - # Support for ?detailed - response = self.client.post("/rest/v2/files/datasets?detailed", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 1) - self.assertEqual(len(list(response.data.values())[0]), 3, response.data) - - def test_get_detailed_related_datasets_ok_2(self): - """ - File identifiers listed below should belong to 5 datasets - """ - file_identifiers = [1, 2, 3, 4, 5] - - response = self.client.post( - "/rest/v2/files/datasets?keys=files", file_identifiers, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 5) - - # set of all returned datasets - self.assertEqual(len(set(sum(response.data.values(), []))), 5, response.data) - - # check if identifiers work - file_identifiers = [ - "pid:urn:1", - "pid:urn:2", - "pid:urn:3", - "pid:urn:4", - "pid:urn:5", - ] - - response = self.client.post( - "/rest/v2/files/datasets?keys=files", file_identifiers, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 5) - - # set of all returned datasets - self.assertEqual(len(set(sum(response.data.values(), []))), 5, response.data) - - def test_get_detailed_related_files_ok_1(self): - """ - Dataset identifiers listed below should have 2 files - """ - response = self.client.post("/rest/v2/files/datasets?keys=datasets", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 1) - self.assertEqual(len(list(response.data.values())[0]), 2, response.data) - - def test_get_detailed_related_files_ok_2(self): - """ - Tests that datasets return files correctly - """ - dataset_identifiers = [1, 2, 3, 4, 5] - - response = self.client.post( - "/rest/v2/files/datasets?keys=datasets", dataset_identifiers, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 5) - - # set of all returned datasets - self.assertEqual(len(set(sum(response.data.values(), []))), 10, response.data) - - # check if identifiers work - dataset_identifiers = [ - "cr955e904-e3dd-4d7e-99f1-3fed446f96d1", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d2", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d3", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d4", - "cr955e904-e3dd-4d7e-99f1-3fed446f96d5", - ] - - response = self.client.post( - "/rest/v2/files/datasets?keys=datasets", dataset_identifiers, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 5) - - # set of all returned datasets - self.assertEqual(len(set(sum(response.data.values(), []))), 10, response.data) - - def test_get_right_files_and_datasets(self): - """ - Check that returned files and datasets are the right ones - """ - testfile = self._get_object_from_test_data("file") - - cr = self.client.get("/rest/v2/datasets/10", format="json") - self.assertEqual(cr.status_code, status.HTTP_200_OK, cr.data) - - response = self.client.post( - "/rest/v2/files/datasets?keys=datasets", - [cr.data["identifier"]], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - # cr 10 has 2 default files - for keys, values in response.data.items(): - self.assertEqual(keys == "cr955e904-e3dd-4d7e-99f1-3fed446f9610", True, response.data) - self.assertEqual("pid:urn:19" and "pid:urn:20" in values, True, response.data) - - response = self.client.post( - "/rest/files/datasets?keys=files", [testfile["identifier"]], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - # file 1 belongs to 3 datasets - for keys, values in response.data.items(): - self.assertEqual(keys == "pid:urn:1", True, response.data) - self.assertEqual( - "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" - and "cr955e904-e3dd-4d7e-99f1-3fed446f9612" - and "cr955e904-e3dd-4d7e-99f1-3fed446f9611" in values, - True, - response.data, - ) - - # Dataset 11 has 20 files in a directory - cr = self.client.get("/rest/v2/datasets/11", format="json") - self.assertEqual(cr.status_code, status.HTTP_200_OK, cr.data) - - # Compare using return from different api - files_in_cr11 = self.client.get("/rest/v2/datasets/11/files", format="json") - self.assertEqual(files_in_cr11.status_code, status.HTTP_200_OK, files_in_cr11.data) - identifiers = [] - [identifiers.append(i["identifier"]) for i in files_in_cr11.data] - - response = self.client.post("/rest/v2/files/datasets?keys=datasets", [11], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - # This should have the same file id's as the return from /rest/v2/datasets/11/files - self.assertEqual( - sorted(response.data["cr955e904-e3dd-4d7e-99f1-3fed446f9611"]), - sorted(identifiers), - response.data, - ) - - response = self.client.post( - "/rest/v2/files/datasets?keys=files", ["pid:urn:20"], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - # Dataset 11 should be found from results - self.assertTrue( - "cr955e904-e3dd-4d7e-99f1-3fed446f9611" in response.data["pid:urn:20"], - response.data, - ) - - def test_get_related_datasets_files_not_found(self): - """ - When the files themselves are not found, 404 should be returned - """ - response = self.client.post("/rest/v2/files/datasets", ["doesnotexist"], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 0) - - response = self.client.post( - "/rest/v2/files/datasets?keys=files", ["doesnotexist"], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 0) - - # Support for ?detailed - response = self.client.post( - "/rest/v2/files/datasets?detailed", ["doesnotexist"], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 0) - - def test_get_related_datasets_records_not_found(self): - """ - When files are found, but no records for them, an empty list should be returned - """ - with connection.cursor() as cr: - # detach file pk 1 from any datasets - cr.execute("delete from metax_api_catalogrecord_files where file_id = 1") - - response = self.client.post("/rest/v2/files/datasets", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 0) - - response = self.client.post("/rest/v2/files/datasets?keys=files", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 0) - - # Support for ?detailed - response = self.client.post("/rest/v2/files/datasets?detailed", [1], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self._assert_results_length(response, 0) - - def _assert_results_length(self, response, length): - self.assertTrue( - isinstance(response.data, dict) or isinstance(response.data, list), - response.data, - ) - self.assertEqual(len(response.data), length) - - -class FileApiReadEndUserAccess(FileApiReadCommon): - def setUp(self): - super().setUp() - self.token = get_test_oidc_token() - self._mock_token_validation_succeeds() - - @responses.activate - def test_user_can_read_owned_files(self): - """ - Ensure users can only read files owned by them from /rest/v2/files api. - """ - - # first read files without project access - should fail - self._use_http_authorization(method="bearer", token=self.token) - proj = File.objects.get(pk=1).project_identifier - - response = self.client.get("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get("/rest/v2/files?project_identifier=%s" % proj) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - response = self.client.get("/rest/files?pagination=false") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - len(response.data), - 0, - "should return 200 OK, but user projects has no files", - ) - - # set user to same project as previous files and try again. should now succeed - self.token["group_names"].append("IDA01:%s" % proj) - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.get("/rest/v2/files") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data) > 0, True, "user should only see their own files") - - response = self.client.get("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - response = self.client.get("/rest/v2/files?project_identifier=%s" % proj) - self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/v2/views/files/write.py b/src/metax_api/tests/api/rest/v2/views/files/write.py index 9e3ea906..96c5484f 100755 --- a/src/metax_api/tests/api/rest/v2/views/files/write.py +++ b/src/metax_api/tests/api/rest/v2/views/files/write.py @@ -5,18 +5,12 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from copy import deepcopy -from os.path import dirname - -import responses from django.core.management import call_command -from django.db.models import Sum from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import CatalogRecord, Directory, File from metax_api.services.redis_cache_service import RedisClient -from metax_api.tests.utils import TestClassUtils, get_test_oidc_token, test_data_file_path +from metax_api.tests.utils import TestClassUtils, test_data_file_path class FileApiWriteCommon(APITestCase, TestClassUtils): @@ -74,29 +68,6 @@ def _get_second_new_test_data(self): self._change_file_path(from_test_data, "file_name_2") return from_test_data - def _count_dirs_from_path(self, file_path): - expected_dirs_count = 1 - dir_name = dirname(file_path) - while dir_name != "/": - dir_name = dirname(dir_name) - expected_dirs_count += 1 - return expected_dirs_count - - def _check_project_root_byte_size_and_file_count(self, project_identifier): - """ - A rather simple test to fetch the root directory of a project, and verify that the - root's calculated total byte size and file count match what exists in the db. - """ - byte_size = File.objects.filter(project_identifier=project_identifier).aggregate( - Sum("byte_size") - )["byte_size__sum"] - file_count = File.objects.filter(project_identifier=project_identifier).count() - - response = self.client.get("/rest/v2/directories/root?project=%s" % project_identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["byte_size"], byte_size) - self.assertEqual(response.data["file_count"], file_count) - def _change_file_path(self, file, new_name): file["file_path"] = file["file_path"].replace(file["file_name"], new_name) file["file_name"] = new_name @@ -145,118 +116,8 @@ def setUp(self): ) self.assertTrue(self.ff_without_version["output_format_version"] == "") - def test_file_format_version_with_invalid_file_format_when_format_version_given_1( - self, - ): - self.test_new_data["file_characteristics"]["format_version"] = "any" - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("file_characteristics" in response.data.keys(), True) - self.assertEqual( - "file_characteristics.file_format" in response.data["file_characteristics"], - True, - ) - - def test_file_format_version_with_invalid_file_format_when_format_version_given_2( - self, - ): - self.test_new_data["file_characteristics"]["file_format"] = "nonexisting" - self.test_new_data["file_characteristics"]["format_version"] = "any" - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("file_characteristics" in response.data.keys(), True) - self.assertEqual( - "file_characteristics.file_format" in response.data["file_characteristics"], - True, - ) - - def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_1( - self, - ): - self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ - "input_file_format" - ] - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("file_characteristics" in response.data.keys(), True) - self.assertEqual( - "file_characteristics.format_version" in response.data["file_characteristics"], - True, - ) - - def test_file_format_version_with_invalid_format_version_when_file_format_has_versions_2( - self, - ): - self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ - "input_file_format" - ] - self.test_new_data["file_characteristics"]["format_version"] = "nonexisting" - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("file_characteristics" in response.data.keys(), True) - self.assertEqual( - "file_characteristics.format_version" in response.data["file_characteristics"], - True, - ) - - def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_1( - self, - ): - self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ - "input_file_format" - ] - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - - def test_file_format_version_with_empty_format_version_when_file_format_has_no_version_2( - self, - ): - self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ - "input_file_format" - ] - self.test_new_data["file_characteristics"]["format_version"] = "" - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - def test_file_format_version_with_valid_file_format_and_valid_file_version_1(self): - self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ - "input_file_format" - ] - self.test_new_data["file_characteristics"]["format_version"] = self.ff_with_version[ - "output_format_version" - ] - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - - def test_file_format_version_with_valid_file_format_and_valid_file_version_2(self): - self.test_new_data["file_characteristics"]["file_format"] = self.ff_with_version[ - "input_file_format" - ] - self.test_new_data["file_characteristics"][ - "format_version" - ] = self.ff_with_different_version["output_format_version"] - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - # update tests - def test_file_characteristics_is_validated_on_update(self): - """ - Ensure validation also works when updating existing files. - """ - self.test_new_data["file_characteristics"]["file_format"] = self.ff_without_version[ - "input_file_format" - ] - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - response = self.client.put( - "/rest/v2/files/%s" % response.data["identifier"], - response.data, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - def test_format_version_is_removed(self): """ Empty file format version should be removed @@ -266,1418 +127,4 @@ def test_format_version_is_removed(self): response = self.client.post("/rest/v2/files", self.test_new_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertTrue("format_version" not in response.data["file_characteristics"]) - - -class FileApiWriteCreateTests(FileApiWriteCommon): - # - # - # - # create apis - # - # - # - - def test_create_file(self): - # note: leading and trailing whitespace must be preserved. - newly_created_file_name = ( - " MX .201015_Suomessa_tavattavat_ruokasammakot_ovat_vƤritykseltƤƤn_vaihtelevia_" - "osa_on_ruskeita,_osa_kirkkaankin_vihreitƤ._Vihersammakoiden_silmƤt_ovat_kohtalaisen_korkealla_pƤƤlae" - "lla._Sammakkolampi.fi_CC-BY-NC-4.0_thumb.jpg.meta " - ) - self.test_new_data["file_name"] = newly_created_file_name - self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("file_name" in response.data.keys(), True) - self.assertEqual(response.data["file_name"], newly_created_file_name) - self._check_project_root_byte_size_and_file_count(response.data["project_identifier"]) - - def test_create_file_error_identifier_exists(self): - # first ok - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - # second should give error - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("identifier" in response.data.keys(), True) - self.assertEqual("already exists" in response.data["identifier"][0], True) - - def test_allow_creating_previously_deleted_file(self): - """ - It should be possible to delete a file, and then create the exact same file again - without letting the removed file conflict. - """ - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - response = self.client.delete("/rest/v2/files/%d" % response.data["id"], format="json") - - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - def test_create_file_error_json_validation(self): - self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - self.test_new_data["file_characteristics"] = { - "application_name": "Application Name", - "description": "A nice description 0000000010", - "metadata_modified": 12345, - "file_created": "2014-01-17T08:19:31Z", - "encoding": "utf-8", - "title": "A title 0000000010", - } - - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "file_characteristics" in response.data.keys(), - True, - "The error should concern the field file_characteristics", - ) - self.assertEqual( - "metadata_modified" in response.data["file_characteristics"][0], - True, - "The error should contain the name of the erroneous field", - ) - self.assertEqual( - "Json path:" in response.data["file_characteristics"][0], - True, - "The error should contain the json path", - ) - - def test_create_file_allowed_checksum_algorithm(self): - self.test_new_data["checksum"]["algorithm"] = "SHA-512" - - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["checksum"]["algorithm"], "SHA-512") - - self.test_new_data["identifier"] = "urn:nbn:fi:csc-md5" - self.test_new_data["file_path"] = "/md5/filepath/md5-filename" - self.test_new_data["file_name"] = "md5-filename" - self.test_new_data["checksum"]["algorithm"] = "MD5" - - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["checksum"]["algorithm"], "MD5") - - def test_create_file_not_allowed_checksum_algorithm(self): - from django.db import transaction - - for algo in ["sha2", "sha256", "sha-256"]: - # run POST requests inside db transaction to ensure django testcase transactions - # work correctly. https://stackoverflow.com/a/23326971/1201945 this probably has - # somethind to do with the fact that POST requests to /rest/v2/files do not normally - # execute inside a db transaction like all other requests to metax api do. see - # file_view.py for details. - # - # alternative for below would be to use optional query param ?dryrun=true, which - # causes the request to be executed inside a transaction too. - with transaction.atomic(): - self.test_new_data["checksum"]["algorithm"] = algo - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual("checksum_algorithm" in response.data, True) - - # - # create list operations - # - - def test_create_file_list(self): - self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - self._change_file_path(self.test_new_data, "one_file.txt") - - self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurnalso" - self._change_file_path(self.second_test_new_data, "two_file.txt") - - response = self.client.post( - "/rest/v2/files", - [self.test_new_data, self.second_test_new_data], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("object" in response.data["success"][0].keys(), True) - self.assertEqual(len(response.data["failed"]), 0, response.data["failed"]) - self.assertEqual(len(response.data["success"]), 2) - self._check_project_root_byte_size_and_file_count( - response.data["success"][0]["object"]["project_identifier"] - ) - - # ensure structure of some specific fields is the same as when single files are created - self.assertEqual( - "identifier" in response.data["success"][0]["object"]["file_storage"], True - ) - self.assertEqual( - "identifier" in response.data["success"][0]["object"]["parent_directory"], - True, - ) - self.assertEqual("checksum" in response.data["success"][0]["object"], True) - self.assertEqual("value" in response.data["success"][0]["object"]["checksum"], True) - - def test_create_file_list_error_one_fails(self): - newly_created_file_name = "newly_created_file_name" - self.test_new_data["file_name"] = newly_created_file_name - self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - # same as above - should fail - self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - - response = self.client.post( - "/rest/v2/files", - [self.test_new_data, self.second_test_new_data], - format="json", - ) - - """ - List response looks like - { - 'success': [ - { 'object': object }, - more objects... - ], - 'failed': [ - { - 'object': object, - 'errors': {'field': ['message'], 'otherfiled': ['message']} - }, - more objects... - ] - } - """ - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual("object" in response.data["failed"][0].keys(), True) - self.assertEqual("file_name" in response.data["failed"][0]["object"].keys(), True) - self.assertEqual( - "identifier" in response.data["failed"][0]["errors"], - True, - "The error should have been about an already existing identifier", - ) - - def test_parameter_ignore_already_exists_errors(self): - newly_created_file_name = "newly_created_file_name" - self.test_new_data["file_name"] = newly_created_file_name - self.test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - # same as above - should cause an error. - self.second_test_new_data["identifier"] = "urn:nbn:fi:csc-thisisanewurn" - - response = self.client.post( - "/rest/v2/files?ignore_already_exists_errors", - [self.test_new_data, self.second_test_new_data], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(len(response.data), 2) - self.assertEqual("already exists" in response.data["success"][1]["object"]["detail"], True) - - def test_create_file_list_error_all_fail(self): - newly_created_file_name = "newly_created_file_name" - self.test_new_data["file_name"] = newly_created_file_name - # identifier is a required field, should fail - self.test_new_data["identifier"] = None - self.second_test_new_data["identifier"] = None - - response = self.client.post( - "/rest/v2/files", - [self.test_new_data, self.second_test_new_data], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual("failed" in response.data.keys(), True) - self.assertEqual("object" in response.data["failed"][0].keys(), True) - self.assertEqual(len(response.data["success"]), 0) - self.assertEqual(len(response.data["failed"]), 2) - - -class FileApiWriteCreateDirectoriesTests(FileApiWriteCommon): - - """ - Only checking directories related stuff in these tests - """ - - def test_create_file_hierarchy_from_single_file(self): - """ - Create, from a single file, a file hierarchy for a project which has 0 files - or directories created previously. - """ - - f = self._form_complex_list_from_test_file()[0] - file_path = ( - "/project_y_FROZEN/Experiment_1/path/of/lonely/file_and_this_also_has_to_support" - "veryverylooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" - "ooooooooooooooooooooooooooooooooooooooongdirectorynames/%s" - ) - f["file_path"] = file_path % f["file_name"] - f["identifier"] = "abc123111" - - response = self.client.post("/rest/v2/files", f, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("date_created" in response.data, True) - self.assertEqual("parent_directory" in response.data, True) - - dirs_count = Directory.objects.filter(project_identifier="project_y").count() - dirs_created_count = self._count_dirs_from_path(f["file_path"]) - self.assertEqual(dirs_count, dirs_created_count) - - def test_create_file_append_to_existing_directory(self): - """ - Appending a file to an existing file hierarchy should not cause any other - changes in any other directories. - - Note: Targeting project_x, which exists in pre-generated test data. - """ - project_identifier = "project_x" - dir_count_before = Directory.objects.filter(project_identifier=project_identifier).count() - file_count_before = ( - Directory.objects.filter( - project_identifier=project_identifier, - directory_path="/project_x_FROZEN/Experiment_X/Phase_1", - ) - .first() - .files.all() - .count() - ) - - f = self._form_complex_list_from_test_file()[0] - f["file_path"] = "/project_x_FROZEN/Experiment_X/Phase_1/%s" % f["file_name"] - f["identifier"] = "%s-111" % f["file_path"] - f["project_identifier"] = project_identifier - - response = self.client.post("/rest/v2/files", f, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("date_created" in response.data, True) - self.assertEqual("parent_directory" in response.data, True) - - dir_count_after = Directory.objects.filter(project_identifier=project_identifier).count() - file_count_after = ( - Directory.objects.filter( - project_identifier=project_identifier, - directory_path="/project_x_FROZEN/Experiment_X/Phase_1", - ) - .first() - .files.all() - .count() - ) - self.assertEqual(dir_count_before, dir_count_after) - self.assertEqual(file_count_after - file_count_before, 1) - - def test_create_file_hierarchy_from_file_list_with_no_existing_files(self): - """ - Create a file hierarchy for a project which has 0 files or directories created previously. - - Here, a directory /project_y_FROZEN/Experiment_1 is "frozen" - """ - experiment_1_file_list = self._form_complex_list_from_test_file() - - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual(len(response.data["success"]), 12) - self.assertEqual(len(response.data["failed"]), 0) - - dirs_dict = self._assert_directory_parent_dirs("project_y") - self._assert_file_parent_dirs(dirs_dict, response) - - def test_create_file_hierarchy_from_file_list_with_existing_files(self): - """ - Create a file hierarchy for a project which already has files or directories - created previously. - - Here the interesting part is, the top-most dir in the file list should find - an existing directory, which it can use as its parent dir. - - Here, a directory /project_y_FROZEN/Experiment_2/Phase_1/Data is "frozen", - when /project_y_FROZEN already exists. - """ - - # setup db to have pre-existing dirs - experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - - # form new test data - experiment_2_file_list = self._form_complex_list_from_test_file() - - for i, f in enumerate(experiment_2_file_list): - f["file_path"] = f["file_path"].replace( - "/project_y_FROZEN/Experiment_1", - "/project_y_FROZEN/Experiment_2/Phase_1/Data", - ) - f["identifier"] = "%s-%d" % (f["file_path"], i) - - response = self.client.post("/rest/v2/files", experiment_2_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual(len(response.data["success"]), 12) - self.assertEqual(len(response.data["failed"]), 0) - - dirs_dict = self._assert_directory_parent_dirs("project_y") - self._assert_file_parent_dirs(dirs_dict, response) - - def test_append_files_to_existing_directory(self): - """ - Append some files to an existing directory. - - Here, 5 files are added to directory /project_y_FROZEN/Experiment_2/ - """ - - # setup db to have pre-existing dirs - experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - - # form new test data, and trim it down a bit - experiment_2_file_list = self._form_complex_list_from_test_file() - while len(experiment_2_file_list) > 5: - experiment_2_file_list.pop() - - for i, f in enumerate(experiment_2_file_list): - f["file_path"] = "/project_y_FROZEN/Experiment_2/%s" % f["file_name"] - f["identifier"] = "%s-%d" % (f["file_path"], i) - - response = self.client.post("/rest/v2/files", experiment_2_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual(len(response.data["failed"]), 0, response.data["failed"]) - self.assertEqual(len(response.data["success"]), 5) - - dirs_dict = self._assert_directory_parent_dirs("project_y") - self._assert_file_parent_dirs(dirs_dict, response) - - def test_append_one_file_to_existing_directory(self): - """ - Append one file to an existing directory. - - Here, 1 file is added to directory /project_y_FROZEN/Experiment_2/ - """ - - # setup db to have pre-existing dirs - experiment_1_file_list = self._form_complex_list_from_test_file() - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - - # form new test data, but use just the first item - experiment_2_file_list = self._form_complex_list_from_test_file()[0:1] - - for i, f in enumerate(experiment_2_file_list): - f["file_path"] = "/project_y_FROZEN/Experiment_2/%s" % f["file_name"] - f["identifier"] = "%s-%d" % (f["file_path"], i) - - response = self.client.post("/rest/v2/files", experiment_2_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - self.assertEqual("success" in response.data.keys(), True) - self.assertEqual(len(response.data["success"]), 1) - self.assertEqual(len(response.data["failed"]), 0) - - dirs_dict = self._assert_directory_parent_dirs("project_y") - self._assert_file_parent_dirs(dirs_dict, response) - - def test_create_file_hierarchy_error_file_list_has_invalid_data(self): - """ - If even one file is missing file_path or project_identifier, the - request is immediately terminated. Creating files for multiple projects - in a single request is also not permitted. - """ - experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0].pop("file_path") - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("file_path" in response.data, True) - self.assertEqual("required parameter" in response.data["file_path"][0], True) - - experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0].pop("project_identifier") - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("project_identifier" in response.data, True) - self.assertEqual("required parameter" in response.data["project_identifier"][0], True) - - experiment_1_file_list = self._form_complex_list_from_test_file() - experiment_1_file_list[0]["project_identifier"] = "second_project" - response = self.client.post("/rest/v2/files", experiment_1_file_list, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("project_identifier" in response.data, True) - self.assertEqual("multiple projects" in response.data["project_identifier"][0], True) - - def test_filepath_starts_with_slash(self): - file = self._get_new_test_data() - file["file_path"] = file["file_path"][1:] - - response = self.client.post("/rest/v2/files", file, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertTrue( - "file path should start with '/' to point to the root" in response.data["file_path"][0] - ) - - def _assert_directory_parent_dirs(self, project_identifier): - """ - Check dirs created during the request have parent dirs as expected. - """ - dirs_dict = {} - - for d in Directory.objects.filter(project_identifier=project_identifier): - dirs_dict[d.directory_path] = { - "dir_id": d.id, - "parent_dir_id": d.parent_directory and d.parent_directory.id or None, - } - - for dir_path, ids in dirs_dict.items(): - if dir_path == "/": - self.assertEqual( - ids["parent_dir_id"], - None, - "root dir '/' should not have a parent directory", - ) - continue - expected_parent_dir_path = dirname(dir_path) - self.assertEqual( - ids["parent_dir_id"], - dirs_dict[expected_parent_dir_path]["dir_id"], - "parent dir not as expected.", - ) - - return dirs_dict - - def _assert_file_parent_dirs(self, dirs_dict, response): - """ - Check files have parent dirs as expected. - """ - for entry in response.data["success"]: - f = entry["object"] - excpected_parent_dir_path = dirname(f["file_path"]) - self.assertEqual( - f["parent_directory"]["id"], - dirs_dict[excpected_parent_dir_path]["dir_id"], - "parent dir not as expected.", - ) - - def _form_complex_list_from_test_file(self): - """ - "complex" list. Notice the leading and trailing whitespace in directories Group_1 and Group_3. - """ - dir_data = [ - { - "file_name": "uudehdko.png", - "file_path": "/project_y_FROZEN/Experiment_1/Group_1 /Results/uudehdko.png", - }, - { - "file_name": "uusi.png", - "file_path": "/project_y_FROZEN/Experiment_1/Group_1 /Results/uusi.png", - }, - { - "file_name": "path.png", - "file_path": "/project_y_FROZEN/Experiment_1/Group_1 /path.png", - }, - { - "file_name": "b_path.png", - "file_path": "/project_y_FROZEN/Experiment_1/b_path.png", - }, - { - "file_name": "everything_that_can_go_wrong_will_go_wrong.png", - "file_path": "/project_y_FROZEN/Experiment_1/Group_2/everything_that_can_go_wrong_will_go_wrong.png", - }, - { - "file_name": "pathx.png", - "file_path": "/project_y_FROZEN/Experiment_1/pathx.png", - }, - { - "file_name": "kansio.png", - "file_path": "/project_y_FROZEN/Experiment_1/Group_1 /Results/Important/kansio.png", - }, - { - "file_name": "some.png", - "file_path": "/project_y_FROZEN/Experiment_1/some.png", - }, - { - "file_name": "aa_toka.png", - "file_path": "/project_y_FROZEN/Experiment_1/aa_toka.png", - }, - { - "file_name": "aa_eka.png", - "file_path": "/project_y_FROZEN/Experiment_1/aa_eka.png", - }, - { - "file_name": "kissa.png", - "file_path": "/project_y_FROZEN/Experiment_1/ Group_3/2017/01/kissa.png", - }, - { - "file_name": "ekaa.png", - "file_path": "/project_y_FROZEN/Experiment_1/ekaa.png", - }, - ] - - template = self.test_new_data - template.pop("id", None) - template.pop("identifier", None) - template.pop("project_identifier", None) - template.pop("parent_directory", None) - template.pop("date_created", None) - template.pop("date_modified", None) - template.pop("service_created", None) - - files = [] - for i, d in enumerate(dir_data): - files.append(deepcopy(template)) - files[-1].update( - d, identifier="pid:urn:test:file:%d" % i, project_identifier="project_y" - ) - - return files - - -class FileApiWriteUpdateTests(FileApiWriteCommon): - """ - update operations PUT - """ - - def test_update_file(self): - f = self.client.get("/rest/v2/files/1").data - f["file_format"] = "csv" - response = self.client.put("/rest/v2/files/%s" % f["identifier"], f, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_prevent_file_path_update_after_create(self): - f = self.client.get("/rest/v2/files/1").data - f["file_path"] = "%s_bak" % f["file_path"] - response = self.client.put("/rest/v2/files/%s" % f["identifier"], f, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_update_file_error_required_fields(self): - """ - Field 'project_identifier' is missing, which should result in an error, since PUT - replaces an object and requires all 'required' fields to be present. - """ - self.test_new_data.pop("project_identifier") - response = self.client.put( - "/rest/v2/files/%s" % self.identifier, self.test_new_data, format="json" - ) - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "project_identifier" in response.data.keys(), - True, - "Error for field 'project_identifier' is missing from response.data", - ) - - def test_update_file_not_found(self): - response = self.client.put("/rest/v2/files/doesnotexist", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_update_file_allowed_projects_ok(self): - f = self.client.get("/rest/v2/files/1").data - response = self.client.put( - "/rest/v2/files/%s?allowed_projects=%s" % (f["identifier"], f["project_identifier"]), - f, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_update_file_allowed_projects_fail(self): - f = self.client.get("/rest/v2/files/1").data - response = self.client.put( - "/rest/v2/files/%s?allowed_projects=nopermission" % f["identifier"], - f, - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - def test_update_file_allowed_projects_not_dict(self): - f = self.client.get("/rest/v2/files/1").data - response = self.client.put( - "/rest/v2/files/%s?allowed_projects=%s" % (f["identifier"], f["project_identifier"]), - [f], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("json" in response.data["detail"][0], True, "Error regarding datatype") - - # - # update list operations PUT - # - - def test_file_update_list(self): - f1 = self.client.get("/rest/v2/files/1").data - f2 = self.client.get("/rest/v2/files/2").data - new_file_format = "changed-format" - new_file_format_2 = "changed-format-2" - f1["file_format"] = new_file_format - f2["file_format"] = new_file_format_2 - - response = self.client.put("/rest/v2/files", [f1, f2], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - updated_file = File.objects.get(pk=1) - self.assertEqual(updated_file.file_format, new_file_format) - - def test_file_update_list_error_one_fails(self): - f1 = self.client.get("/rest/v2/files/1").data - f2 = self.client.get("/rest/v2/files/2").data - new_file_format = "changed-format" - f1["file_format"] = new_file_format - # cant be null - should fail - f2["file_frozen"] = None - - response = self.client.put("/rest/v2/files", [f1, f2], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["success"]), 1, "success list should be empty") - self.assertEqual( - len(response.data["failed"]), 1, "there should have been one failed element" - ) - self.assertEqual( - "file_frozen" in response.data["failed"][0]["errors"], - True, - "error should be about file_characteristics missing", - ) - - updated_file = File.objects.get(pk=1) - self.assertEqual(updated_file.file_format, new_file_format) - - def test_file_update_list_error_key_not_found(self): - f1 = self.client.get("/rest/v2/files/1").data - f2 = self.client.get("/rest/v2/files/2").data - new_file_format = "changed-format" - new_file_format_2 = "changed-format-2" - f1["file_format"] = new_file_format - f2["file_format"] = new_file_format_2 - # has no lookup key - should fail - f2.pop("id") - f2.pop("identifier") - - response = self.client.put("/rest/v2/files", [f1, f2], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(len(response.data["success"]), 1, "success list should be empty") - self.assertEqual( - len(response.data["failed"]), 1, "there should have been one failed element" - ) - error_msg_of_failed_row = response.data["failed"][0]["errors"]["detail"][0] - self.assertEqual( - "identifying keys" in error_msg_of_failed_row, - True, - "error should be about identifying keys missing", - ) - - updated_file = File.objects.get(pk=1) - self.assertEqual(updated_file.file_format, new_file_format) - - def test_file_update_list_allowed_projects_ok(self): - # Both files in project 'project_x' - f1 = self.client.get("/rest/v2/files/1").data - f2 = self.client.get("/rest/v2/files/2").data - - response = self.client.put( - "/rest/v2/files?allowed_projects=project_x,y,z", [f1, f2], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_file_update_list_allowed_projects_fail(self): - # Files in projects 'project_x' and 'research_project_112' - f1 = self.client.get("/rest/v2/files/1").data - f2 = self.client.get("/rest/v2/files/39").data - - response = self.client.put( - "/rest/v2/files?allowed_projects=project_x,y,z", [f1, f2], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_file_update_list_allowed_projects_empty_value(self): - f1 = self.client.get("/rest/v2/files/1").data - response = self.client.put("/rest/v2/files?allowed_projects=", [f1], format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_file_update_list_allowed_projects_not_list(self): - new_data_1 = {} - new_data_1["identifier"] = "pid:urn:1" - new_data_1["file_name"] = "Nice_new_name" - - res = self.client.patch( - "/rest/v2/files?allowed_projects=y,z,project_x", new_data_1, format="json" - ) - self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) - - -class FileApiWritePartialUpdateTests(FileApiWriteCommon): - """ - update operations PATCH - """ - - def test_update_file_partial(self): - new_data = { - "file_name": "new_file_name", - } - response = self.client.patch("/rest/v2/files/%s" % self.identifier, new_data, format="json") - - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("file_name" in response.data.keys(), True) - self.assertEqual( - "file_path" in response.data.keys(), - True, - "PATCH operation should return full content", - ) - self.assertEqual( - response.data["file_name"], - "new_file_name", - "Field file_name was not updated", - ) - - def test_update_partial_allowed_projects_ok(self): - new_data = { - "file_name": "new_file_name", - } - response = self.client.patch( - "/rest/v2/files/%s?allowed_projects=%s" % (self.identifier, self.pidentifier), - new_data, - format="json", - ) - - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["file_name"], "new_file_name", response.data) - - def test_update_partial_allowed_projects_fail(self): - new_data = { - "file_name": "new_file_name", - } - response = self.client.patch( - "/rest/v2/files/%s?allowed_projects=noproject" % self.identifier, - new_data, - format="json", - ) - - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_update_partial_allowed_projects_not_dict(self): - new_data = { - "file_name": "new_file_name", - } - response = self.client.patch( - "/rest/v2/files/%s?allowed_projects=%s" % (self.identifier, self.pidentifier), - [new_data], - format="json", - ) - - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - self.assertEqual("json" in response.data["detail"][0], True, "Error regarding datatype") - - # - # update list operations PATCH - # - - def test_file_partial_update_list(self): - new_project_identifier = "changed-project-identifier" - new_project_identifier_2 = "changed-project-identifier-2" - - test_data = {} - test_data["id"] = 1 - test_data["project_identifier"] = new_project_identifier - - second_test_data = {} - second_test_data["id"] = 2 - second_test_data["project_identifier"] = new_project_identifier_2 - - response = self.client.patch("/rest/v2/files", [test_data, second_test_data], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - "success" in response.data, - True, - "response.data should contain list of changed objects", - ) - self.assertEqual( - len(response.data["success"]), - 2, - "response.data should contain 2 changed objects", - ) - self.assertEqual( - "file_characteristics" in response.data["success"][0]["object"], - True, - "response.data should contain full objects", - ) - - updated_file = File.objects.get(pk=1) - self.assertEqual( - updated_file.project_identifier, - new_project_identifier, - "project_identifier did not update", - ) - - def test_file_partial_update_list_allowed_projects_ok(self): - new_data_1 = {} - new_data_1["identifier"] = "pid:urn:1" - new_data_1["file_name"] = "Nice_new_name" - - new_data_2 = {} - new_data_2["identifier"] = "pid:urn:2" - new_data_2["file_name"] = "Not_so_nice_name" - - res = self.client.patch( - "/rest/v2/files?allowed_projects=y,z,project_x", - [new_data_1, new_data_2], - format="json", - ) - self.assertEqual(res.status_code, status.HTTP_200_OK, res.data) - self.assertEqual(res.data["success"][0]["object"]["file_name"], "Nice_new_name", res.data) - - def test_file_partial_update_list_allowed_projects_fail(self): - # Files in projects 'project_x' and 'research_project_112' - f1 = self.client.get("/rest/v2/files/1").data - f2 = self.client.get("/rest/v2/files/39").data - - response = self.client.patch( - "/rest/v2/files?allowed_projects=project_x,y,z", [f1, f2], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - - def test_file_partial_update_list_allowed_projects_not_list(self): - new_data_1 = {} - new_data_1["identifier"] = "pid:urn:1" - new_data_1["file_name"] = "Nice_new_name" - - res = self.client.patch( - "/rest/v2/files?allowed_projects=y,z,project_x", new_data_1, format="json" - ) - self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) - - def test_file_partial_update_list_allowed_projects_no_identifier(self): - new_data_1 = {} - new_data_1["file_name"] = "Nice_new_name" - - new_data_2 = {} - new_data_2["id"] = 23 - new_data_2["file_name"] = "Not_so_nice_name" - - res = self.client.patch( - "/rest/v2/files?allowed_projects=y,z,project_x", - [new_data_1, new_data_2], - format="json", - ) - self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST, res.data) - - -class FileApiWriteDeleteTests(FileApiWriteCommon): - # - # - # - # delete apis - # - # - # - - def test_delete_single_file_ok(self): - dir_count_before = Directory.objects.all().count() - response = self.client.delete("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("deleted_files_count" in response.data, True, response.data) - self.assertEqual(response.data["deleted_files_count"], 1, response.data) - dir_count_after = Directory.objects.all().count() - self.assertEqual(dir_count_before, dir_count_after, "no dirs should have been deleted") - deleted_file = File.objects_unfiltered.get(pk=1) - self._check_project_root_byte_size_and_file_count(deleted_file.project_identifier) - self.assertEqual( - deleted_file.date_modified, - deleted_file.file_deleted, - "date_modified should be updated", - ) - - def test_delete_single_file_ok_destroy_leading_dirs(self): - project_identifier = "project_z" - test_data = deepcopy(self.test_new_data) - test_data["file_path"] = "/project_z/some/path/here/%s" % test_data["file_name"] - test_data["project_identifier"] = project_identifier - test_data["identifier"] = "abc123" - response = self.client.post("/rest/v2/files", test_data, format="json") - self.assertEqual( - Directory.objects.filter(project_identifier=project_identifier).exists(), - True, - ) - - response = self.client.delete("/rest/v2/files/%s" % response.data["id"]) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("deleted_files_count" in response.data, True, response.data) - self.assertEqual(response.data["deleted_files_count"], 1, response.data) - - self.assertEqual( - Directory.objects.filter(project_identifier=project_identifier).exists(), - False, - ) - - def test_delete_single_file_404(self): - response = self.client.delete("/rest/v2/files/doesnotexist") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_bulk_delete_files_identifiers_not_found(self): - """ - A bulk delete request to /files, but any of the identifiers provided are not found. - Should return 404. - """ - identifiers = ["nope", "doesnotexist", "stillno"] - response = self.client.delete("/rest/v2/files", identifiers, format="json") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) - - def test_bulk_delete_files_some_identifiers_not_found(self): - """ - A bulk delete request to /files, but some of the identifiers provided are not found. - Should be ok, delete those files that are found. Assumably those identifiers that were - not found did not exist anyway, therefore no harm is done. - """ - identifiers = ["nope", "doesnotexist", "stillno"] - identifiers.append(File.objects.get(pk=1).identifier) - response = self.client.delete("/rest/v2/files", identifiers, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - removed = File.objects_unfiltered.get(pk=1).removed - self.assertEqual(removed, True, "file should have been removed") - self._check_project_root_byte_size_and_file_count( - File.objects_unfiltered.get(pk=1).project_identifier - ) - - def test_bulk_delete_files_in_single_directory_1(self): - """ - A bulk delete request to /files, where the list of files does not contain a full - directory and all its sub-directories. - - Only the files requested should be deleted, while leaving the rest of the directory - tree intact. - """ - all_files_count_before = File.objects.all().count() - file_ids = [f.id for f in Directory.objects.get(pk=3).files.all()] - - response = self.client.delete("/rest/v2/files", file_ids, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - all_files_count_after = File.objects.all().count() - self.assertEqual(all_files_count_after, all_files_count_before - len(file_ids)) - - def test_bulk_delete_files_in_single_directory_2(self): - """ - Same as above, but target another directory. - """ - all_files_count_before = File.objects.all().count() - file_ids = [f.id for f in Directory.objects.get(pk=4).files.all()] - - response = self.client.delete("/rest/v2/files", file_ids, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - all_files_count_after = File.objects.all().count() - self.assertEqual(all_files_count_after, all_files_count_before - len(file_ids)) - - def test_bulk_delete_file_list_one_file_id_missing(self): - """ - Otherwise complete set of files, but from one dir one file is missing. - Should leave the one file intact, while preserving the directory tree. - """ - all_files_count_before = File.objects.filter(project_identifier="project_x").count() - file_ids = [f.id for f in File.objects.filter(project_identifier="project_x")] - - # everything except the last file should be removed - file_ids.pop() - - response = self.client.delete("/rest/v2/files", file_ids, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - all_files_after = File.objects.filter(project_identifier="project_x") - self.assertEqual(all_files_after.count(), all_files_count_before - len(file_ids)) - - expected_dirs_count = self._count_dirs_from_path(all_files_after[0].file_path) - actual_dirs_count = Directory.objects.filter(project_identifier="project_x").count() - self.assertEqual(actual_dirs_count, expected_dirs_count) - - def test_bulk_delete_files_from_root(self): - """ - Delete all files in project_x. The top-most dir should be /project_x_FROZEN/Experiment_X, - so the whole tree should end up being deleted. - """ - files_to_remove_count = 20 - file_ids = File.objects.filter(project_identifier="project_x").values_list("id", flat=True) - self.assertEqual(len(file_ids), files_to_remove_count) - - response = self.client.delete("/rest/v2/files", file_ids, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - response.data.get("deleted_files_count", None), - files_to_remove_count, - response.data, - ) - - self._assert_files_available_and_removed("project_x", 0, files_to_remove_count) - self.assertEqual( - Directory.objects_unfiltered.filter(project_identifier="project_x").count(), - 0, - "all dirs should have been permanently removed", - ) - - def test_bulk_delete_sub_directory_1(self): - """ - Delete from /project_x_FROZEN/Experiment_X/Phase_1, which should remove - only 15 files. - """ - files_to_remove_count = 15 - file_ids = [f.id for f in Directory.objects.get(pk=4).files.all()] - file_ids += [f.id for f in Directory.objects.get(pk=6).files.all()] - self.assertEqual(len(file_ids), files_to_remove_count) - - response = self.client.delete("/rest/v2/files", file_ids, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - response.data.get("deleted_files_count", None), - files_to_remove_count, - response.data, - ) - - self._assert_files_available_and_removed("project_x", 5, files_to_remove_count) - - # these dirs should still be left: - # / - # /project_x_FROZEN - # /project_x_FROZEN/Experiment_X (has 5 files) - self.assertEqual(Directory.objects.filter(project_identifier="project_x").count(), 3) - - def test_bulk_delete_sub_directory_2(self): - """ - Delete from /project_x_FROZEN/Experiment_X/Phase_1/2017/01, which should - remove only 10 files. - """ - files_to_remove_count = 10 - file_ids = [f.id for f in Directory.objects.get(pk=6).files.all()] - self.assertEqual(len(file_ids), files_to_remove_count) - - response = self.client.delete("/rest/v2/files", file_ids, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - response.data.get("deleted_files_count", None), - files_to_remove_count, - response.data, - ) - - self._assert_files_available_and_removed("project_x", 10, files_to_remove_count) - - # these dirs should still be left: - # / - # /project_x_FROZEN - # /project_x_FROZEN/Experiment_X (5 files) - # /project_x_FROZEN/Experiment_X/Phase_1 (5 files) - self.assertEqual(Directory.objects.filter(project_identifier="project_x").count(), 4) - - # /project_x_FROZEN/Experiment_X/Phase_1/2017 <- this dir should be deleted, since - # it only contained the 01-dir, which we specifically targeted for deletion - self.assertEqual( - Directory.objects.filter( - project_identifier="project_x", - directory_path="/project_x_FROZEN/Experiment_X/Phase_1/2017", - ).count(), - 0, - "dir should have been deleted", - ) - - def _assert_files_available_and_removed(self, project_identifier, available, removed): - """ - After deleting files, check qty of files retrievable by usual means is as expected, - and qty of files retrievable from objects_unfiltered with removed=True is as expected. - """ - self.assertEqual( - File.objects.filter(project_identifier=project_identifier).count(), - available, - "files should not be retrievable from removed=False scope", - ) - self.assertEqual( - File.objects_unfiltered.filter( - project_identifier=project_identifier, removed=True - ).count(), - removed, - "files should be retrievable from removed=True scope", - ) - - def test_deleting_files_deprecates_datasets(self): - for cr in CatalogRecord.objects.filter(deprecated=True): - # ensure later assert is correct - cr.deprecated = False - cr.force_save() - - datasets_with_file = CatalogRecord.objects.filter(files__id=1).count() - response = self.client.delete("/rest/v2/files/1") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(CatalogRecord.objects.filter(deprecated=True).count(), datasets_with_file) - - -class FileApiWriteRestoreTests(FileApiWriteCommon): - def test_restore_files_ok(self): - """ - Restore a few deleted files from directories, that still contain other files. - Restored files should be appended to previously existing files. - """ - response = self.client.delete("/rest/v2/files/1") - response = self.client.delete("/rest/v2/files/2") - response = self.client.delete("/rest/v2/files/3") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - deleted_files = File.objects_unfiltered.filter(pk__in=[1, 2, 3]).values( - "identifier", "parent_directory_id" - ) - - response = self.client.post( - "/rest/v2/files/restore", - [f["identifier"] for f in deleted_files], - format="json", - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("restored_files_count" in response.data, True, response.data) - self.assertEqual(response.data["restored_files_count"], 3, response.data) - - # ensure restored files are using previously existing directories - old_parent_dirs = {f["parent_directory_id"] for f in deleted_files} - files = File.objects.filter(pk__in=[1, 2, 3]) - for f in files: - self.assertEqual(f.file_deleted, None) - self.assertEqual(f.user_modified, None) - self.assertEqual(f.parent_directory_id in old_parent_dirs, True) - - def test_restore_files_recreate_missing_directories(self): - """ - Restore an entire project. Files should have new directories. - """ - proj = File.objects.get(pk=1).project_identifier - - response = self.client.get( - "/rest/files?project_identifier=%s&fields=identifier&pagination=false" % proj, - format="json", - ) - file_identifiers = [f["identifier"] for f in response.data] - - self.client.delete("/rest/v2/files", file_identifiers, format="json") - - deleted_directory_ids = File.objects_unfiltered.filter( - identifier__in=file_identifiers - ).values_list("parent_directory_id", flat=True) - old_parent_dirs = {id for id in deleted_directory_ids} - - response = self.client.post("/rest/v2/files/restore", file_identifiers, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("restored_files_count" in response.data, True, response.data) - self.assertEqual( - response.data["restored_files_count"], len(file_identifiers), response.data - ) - - # ensure restored files are using new directories - files = File.objects.filter(identifier__in=file_identifiers) - for f in files: - self.assertEqual(f.parent_directory_id in old_parent_dirs, False) - - def test_check_parameter_is_string_list(self): - response = self.client.post("/rest/v2/files/restore", ["a", "b", 1], format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - def test_check_files_belong_to_one_project(self): - f1 = File.objects_unfiltered.get(pk=1) - f2 = ( - File.objects_unfiltered.filter() - .exclude(project_identifier=f1.project_identifier) - .first() - ) - response = self.client.delete("/rest/v2/files/%d" % f1.id) - response = self.client.delete("/rest/v2/files/%d" % f2.id) - response = self.client.post( - "/rest/v2/files/restore", [f1.identifier, f2.identifier], format="json" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - -class FileApiWriteXmlTests(FileApiWriteCommon): - """ - /files/pid/xml related tests - """ - - def test_xml_api(self): - content_type = "application/xml" - data = ( - 'tauhketta yeah' - ) - - # create - response = self.client.post( - "/rest/v2/files/1/xml?namespace=breh", data, content_type=content_type - ) - self.assertEqual(response.status_code in (200, 201, 204), True) - self.assertEqual("updated stuff' - response = self.client.put( - "/rest/v2/files/1/xml?namespace=breh", data, content_type=content_type - ) - self.assertEqual(response.status_code in (200, 201, 204), True) - - # get updated again - response = self.client.get( - "/rest/v2/files/1/xml?namespace=breh", - content_type=content_type, - ) - self.assertEqual("updated stuff" in response.data, True) - - # delete - response = self.client.delete( - "/rest/v2/files/1/xml?namespace=breh", data, content_type=content_type - ) - self.assertEqual(response.status_code in (200, 201, 204), True) - - response = self.client.delete( - "/rest/v2/files/1/xml?namespace=bruh", data, content_type=content_type - ) - self.assertEqual(response.status_code in (200, 201, 204), True) - - # get list - response = self.client.get("/rest/v2/files/1/xml", content_type=content_type) - self.assertEqual(response.status_code in (200, 201, 204), True) - - -class FileApiWriteEndUserAccess(FileApiWriteCommon): - def setUp(self): - super().setUp() - self.token = get_test_oidc_token() - self._mock_token_validation_succeeds() - - @responses.activate - def test_user_cant_create_files(self): - """ - Ensure users are unable to create new files. - """ - - # ensure user belongs to same project - self.token["group_names"].append("IDA01:%s" % self.test_new_data["project_identifier"]) - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.post("/rest/v2/files", self.test_new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - @responses.activate - def test_user_can_only_update_permitted_file_fields(self): - """ - Ensure users are only able to modify permitted fields. - """ - # ensure user belongs to same project - proj = File.objects.get(pk=1).project_identifier - self.token["group_names"].append("IDA01:%s" % proj) - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.get("/rest/v2/files/1", format="json") - file = response.data - original_file = deepcopy(file) - file["byte_size"] = 200 - file["checksum"]["value"] = "changed" - file["parent_directory"] = 1 - file["file_frozen"] = "3" + file["file_frozen"][1:] - file["file_format"] = "changed" - file["file_name"] = "changed" - file["file_path"] = "/oh/no" - file["file_storage"] = 2 - file["file_uploaded"] = "3" + file["file_uploaded"][1:] - file["identifier"] = "changed" - file["open_access"] = True - file["project_identifier"] = "changed" - file["service_modified"] = "changed" - file["service_created"] = "changed" - file["removed"] = True - - # the only field that should be changed - file["file_characteristics"] = {"title": "new title"} - - response = self.client.put("/rest/v2/files/1", file, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["file_characteristics"]["title"], "new title", response.data) - - for key, value in response.data.items(): - try: - if key in ("date_modified", "file_modified"): - # these fields are changed by metax - continue - elif key == "file_characteristics": - # the field that should have been changed by the user - self.assertNotEqual(original_file[key], response.data[key]) - else: - # must not have changed - self.assertEqual(original_file[key], response.data[key]) - except KeyError as e: - if e.args[0] == "user_modified": - # added by metax - continue - raise - - @responses.activate - def test_user_can_update_files_in_their_projects(self): - """ - Ensure users can edit files in projects they are a member of. - """ - proj = File.objects.only("project_identifier").get(pk=1).project_identifier - - response = self.client.get("/rest/v2/files?project_identifier=%s" % proj, format="json") - - file = response.data["results"][0] - - self.token["group_names"].append("IDA01:%s" % proj) - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.put("/rest/v2/files/%s" % file["id"], file, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - response = self.client.put("/rest/v2/files", [file], format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK) - - @responses.activate - def test_user_cant_update_files_in_others_projects(self): - """ - Ensure users can not edit files in projects they are not a member of. - """ - proj = File.objects.only("project_identifier").get(pk=1).project_identifier - - response = self.client.get("/rest/v2/files?project_identifier=%s" % proj, format="json") - - file = response.data["results"][0] - - self.token["group_names"] = ["no_files_for_this_project"] - self._use_http_authorization(method="bearer", token=self.token) - - response = self.client.put("/rest/v2/files/%s" % file["id"], file, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - response = self.client.put("/rest/v2/files", [file], format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - -class FileApiWriteDryrunTest(FileApiWriteCommon): - - """ - Test query param ?dryrun=bool separately for post /rest/v2/files api, due to special - behavior in POST /rest/v2/files. - - For other apis, the common test case is among views/common/write tests. - """ - - def test_dryrun(self): - """ - Ensure query parameter ?dryrun=true returns same result as they normally would, but - changes made during the request do not get saved in the db. - """ - response = self.client.post( - "/rest/v2/files?what&dryrun=true&other", self.test_new_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual("id" in response.data, True) - found = File.objects.filter(pk=response.data["id"]).exists() - self.assertEqual( - found, - False, - "file should not get truly created when using parameter dryrun", - ) + self.assertTrue("format_version" not in response.data["file_characteristics"]) \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/filestorages/__init__.py b/src/metax_api/tests/api/rest/v2/views/filestorages/__init__.py deleted file mode 100755 index 6adb6e69..00000000 --- a/src/metax_api/tests/api/rest/v2/views/filestorages/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from .read import * -from .write import * diff --git a/src/metax_api/tests/api/rest/v2/views/filestorages/read.py b/src/metax_api/tests/api/rest/v2/views/filestorages/read.py deleted file mode 100755 index bbb54665..00000000 --- a/src/metax_api/tests/api/rest/v2/views/filestorages/read.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from django.core.management import call_command -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.models import FileStorage -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class FileStorageApiReadBasicTests(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(FileStorageApiReadBasicTests, cls).setUpClass() - - def setUp(self): - self._use_http_authorization() - - def test_basic_get(self): - fs = FileStorage.objects.get(pk=1) - response = self.client.get("/rest/v2/filestorages/%d" % fs.id) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get("/rest/v2/filestorages/%s" % fs.file_storage_json["identifier"]) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_basic_list(self): - response = self.client.get("/rest/v2/filestorages") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual( - len(response.data["results"]), - FileStorage.objects.all().count(), - response.data, - ) diff --git a/src/metax_api/tests/api/rest/v2/views/filestorages/write.py b/src/metax_api/tests/api/rest/v2/views/filestorages/write.py deleted file mode 100755 index e8f86d32..00000000 --- a/src/metax_api/tests/api/rest/v2/views/filestorages/write.py +++ /dev/null @@ -1,53 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from django.core.management import call_command -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.models import FileStorage -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class FileStorageApiWriteCommon(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(FileStorageApiWriteCommon, cls).setUpClass() - - def setUp(self): - self.new_test_data = self._get_object_from_test_data("filestorage") - self.new_test_data.pop("id") - self.new_test_data["file_storage_json"]["identifier"] = "new-file-storage" - self._use_http_authorization() - - -class FileStorageApiWriteBasicTests(FileStorageApiWriteCommon): - def test_create(self): - response = self.client.post("/rest/v2/filestorages", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - - def test_create_identifier_already_exists(self): - response = self.client.post("/rest/v2/filestorages", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - response = self.client.post("/rest/v2/filestorages", self.new_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual( - "already exists" in response.data["file_storage_json"]["identifier"][0], - True, - response.data, - ) - - def test_delete(self): - response = self.client.delete("/rest/v2/filestorages/1") - self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) - fs = FileStorage.objects_unfiltered.get(pk=1) - self.assertEqual(fs.removed, True, "should be deleted") - self.assertEqual(fs.date_removed, fs.date_modified) diff --git a/src/metax_api/tests/api/rest/v2/views/schemas/__init__.py b/src/metax_api/tests/api/rest/v2/views/schemas/__init__.py deleted file mode 100755 index 0ffba528..00000000 --- a/src/metax_api/tests/api/rest/v2/views/schemas/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from .read import * diff --git a/src/metax_api/tests/api/rest/v2/views/schemas/read.py b/src/metax_api/tests/api/rest/v2/views/schemas/read.py deleted file mode 100755 index 3fd372cf..00000000 --- a/src/metax_api/tests/api/rest/v2/views/schemas/read.py +++ /dev/null @@ -1,35 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.tests.utils import TestClassUtils - - -class SchemaApiReadTests(APITestCase, TestClassUtils): - def test_read_schemas_list(self): - response = self.client.get("/rest/v2/schemas") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response.data["count"] > 0) - - def test_read_schemas_list_html(self): - headers = {"HTTP_ACCEPT": "text/html"} - response = self.client.get("/rest/v2/schemas", **headers) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response._headers["content-type"][1].find("text/html") >= 0) - - def test_read_schema_retrieve_existing(self): - list_response = self.client.get("/rest/v2/schemas") - self.assertEqual(list_response.status_code, status.HTTP_200_OK) - self.assertTrue(list_response.data["count"] > 0, "No schemas available") - response = self.client.get("/rest/v2/schemas/%s" % list_response.data["results"][0]) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_schema_not_exists(self): - response = self.client.get("/rest/v2/schemas/thisshouldnotexist") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) diff --git a/src/metax_api/tests/api/rpc/v2/views/__init__.py b/src/metax_api/tests/api/rpc/v2/views/__init__.py index 1fe254c9..e965fca2 100755 --- a/src/metax_api/tests/api/rpc/v2/views/__init__.py +++ b/src/metax_api/tests/api/rpc/v2/views/__init__.py @@ -5,7 +5,6 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from .common_rpc import * from .dataset_rpc import * from .file_rpc import * from .statistic_rpc import * diff --git a/src/metax_api/tests/api/rpc/v2/views/common_rpc.py b/src/metax_api/tests/api/rpc/v2/views/common_rpc.py deleted file mode 100755 index 5a56dd59..00000000 --- a/src/metax_api/tests/api/rpc/v2/views/common_rpc.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.tests.utils import TestClassUtils - - -class CommonRPCTests(APITestCase, TestClassUtils): - def test_list_valid_methods(self): - """ - When an invalid (or mistyped) method name is attempted, the api should list valid methods - names for that RPC endpoint. - """ - response = self.client.get("/rpc/v2/datasets/nonexisting") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - self.assertEqual("methods are: " in response.data["detail"][0], True, response.content) diff --git a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py index d04adf54..d4382b12 100755 --- a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py @@ -7,121 +7,15 @@ import responses from django.conf import settings -from django.core.management import call_command from rest_framework import status -from rest_framework.test import APITestCase from metax_api.models import CatalogRecordV2 from metax_api.tests.api.rest.base.views.datasets.write import CatalogRecordApiWriteCommon -from metax_api.tests.utils import TestClassUtils, get_test_oidc_token, test_data_file_path +from metax_api.tests.utils import get_test_oidc_token CR = CatalogRecordV2 -class DatasetRPCTests(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - super().setUpClass() - call_command("loaddata", test_data_file_path, verbosity=0) - - def setUp(self): - super().setUp() - self.create_end_user_data_catalogs() - - @responses.activate - def test_get_minimal_dataset_template(self): - """ - Retrieve and use a minimal dataset template example from the api. - """ - - # query param type is missing, should return error and description what to do. - response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # test preventing typos - response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template?type=wrong") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # test minimal dataset for service use - response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template?type=service") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue("metadata_provider_org" in response.data) - self.assertTrue("metadata_provider_user" in response.data) - self._use_http_authorization(username="testuser") - response = self.client.post("/rest/v2/datasets", response.data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - - # test minimal dataset for end user use - response = self.client.get("/rpc/v2/datasets/get_minimal_dataset_template?type=enduser") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue("metadata_provider_org" not in response.data) - self.assertTrue("metadata_provider_user" not in response.data) - self._use_http_authorization(method="bearer", token=get_test_oidc_token()) - self._mock_token_validation_succeeds() - response = self.client.post("/rest/v2/datasets", response.data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED) - - def test_set_preservation_identifier(self): - self._set_http_authorization("service") - - # Parameter 'identifier' is required - response = self.client.post("/rpc/v2/datasets/set_preservation_identifier") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # Nonexisting identifier should return 404 - response = self.client.post( - "/rpc/v2/datasets/set_preservation_identifier?identifier=nonexisting" - ) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - # Create ida data catalog - dc = self._get_object_from_test_data("datacatalog", requested_index=0) - dc_id = settings.IDA_DATA_CATALOG_IDENTIFIER - dc["catalog_json"]["identifier"] = dc_id - self.client.post("/rest/v2/datacatalogs", dc, format="json") - - # Test OK ops - - # Create new ida cr without doi - cr_json = self.client.get("/rest/v2/datasets/1").data - cr_json.pop("preservation_identifier", None) - cr_json.pop("identifier") - cr_json["research_dataset"].pop("preferred_identifier", None) - cr_json["data_catalog"] = dc_id - cr_json["research_dataset"]["issued"] = "2018-01-01" - cr_json["research_dataset"]["publisher"] = { - "@type": "Organization", - "name": {"en": "publisher"}, - } - - response = self.client.post("/rest/v2/datasets?pid_type=urn", cr_json, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - identifier = response.data["identifier"] - - # Verify rpc api returns the same doi as the one that is set to the datasets' preservation identifier - response = self.client.post( - f"/rpc/v2/datasets/set_preservation_identifier?identifier={identifier}" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response2 = self.client.get(f"/rest/v2/datasets/{identifier}") - self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) - self.assertEqual(response.data, response2.data["preservation_identifier"], response2.data) - - # Return 400 if request is not correct datacite format - response2.data["research_dataset"].pop("issued") - response = self.client.put(f"/rest/v2/datasets/{identifier}", response2.data, format="json") - self.assertEqual(response2.status_code, status.HTTP_200_OK, response2.data) - - response = self.client.post( - f"/rpc/v2/datasets/set_preservation_identifier?identifier={identifier}" - ) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - - class ChangeCumulativeStateRPC(CatalogRecordApiWriteCommon): """ diff --git a/src/metax_api/tests/api/rpc/v2/views/file_rpc.py b/src/metax_api/tests/api/rpc/v2/views/file_rpc.py index eb09a185..8d8d6828 100755 --- a/src/metax_api/tests/api/rpc/v2/views/file_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/file_rpc.py @@ -9,7 +9,7 @@ from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import CatalogRecordV2, Directory, File +from metax_api.models import CatalogRecordV2, File from metax_api.tests.utils import TestClassUtils, test_data_file_path @@ -30,48 +30,6 @@ class DeleteProjectTests(FileRPCTests): correct result for successful operations. """ - def test_wrong_parameters(self): - # correct user, no project identifier - response = self.client.post("/rpc/v2/files/delete_project") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - - # nonexisting project identifier: - response = self.client.post("/rpc/v2/files/delete_project?project_identifier=non_existing") - self.assertEqual(response.data["deleted_files_count"], 0) - - # wrong request method - response = self.client.delete( - "/rpc/v2/files/delete_project?project_identifier=research_project_112" - ) - self.assertEqual(response.status_code, 501) - - # wrong user - self._use_http_authorization("api_auth_user") - response = self.client.post( - "/rpc/v2/files/delete_project?project_identifier=research_project_112" - ) - # self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - - def test_known_project_identifier(self): - response = self.client.post( - "/rpc/v2/files/delete_project?project_identifier=research_project_112" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_files_are_marked_deleted(self): - files_count_before = File.objects.filter(project_identifier="research_project_112").count() - response = self.client.post( - "/rpc/v2/files/delete_project?project_identifier=research_project_112" - ) - self.assertEqual(files_count_before, response.data["deleted_files_count"]) - - def test_directories_are_deleted(self): - self.client.post("/rpc/v2/files/delete_project?project_identifier=research_project_112") - directories_count_after = Directory.objects.filter( - project_identifier="research_project_112" - ).count() - self.assertEqual(directories_count_after, 0) - def test_datasets_are_marked_deprecated(self): file_ids = File.objects.filter(project_identifier="project_x").values_list("id", flat=True) related_dataset = CatalogRecordV2.objects.filter(files__in=file_ids).distinct("id")[0] diff --git a/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py b/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py index 674388b7..4bededc0 100755 --- a/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py @@ -124,76 +124,6 @@ def _setup_testdata(self): response = self.client.post("/rest/v2/datacatalogs", dc_json, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - def _set_deprecated_dataset(self, id=1): - cr = CatalogRecord.objects.get(id=id) - cr.deprecated = True - cr.force_save() - - def _set_removed_dataset(self, id=1): - cr = CatalogRecord.objects.get(id=id) - cr.removed = True - cr.force_save() - - def _create_legacy_dataset(self): - """ - Creates one new legacy dataset and returns its id - """ - legacy_dataset = { - "data_catalog": {"identifier": settings.LEGACY_CATALOGS[0]}, - "metadata_owner_org": "some_org_id", - "metadata_provider_org": "some_org_id", - "metadata_provider_user": "some_user_id", - "research_dataset": { - "title": {"en": "Test Dataset Title"}, - "description": { - "en": "A descriptive description describing the contents of this dataset. Must be descriptive." - }, - "files": [ - { - "identifier": "pid:urn:1", - "title": "File Title", - "description": "informative description", - "use_category": {"identifier": "method"}, - }, - { - "identifier": "pid:urn:3", - "title": "File Title", - "description": "informative description", - "use_category": {"identifier": "method"}, - }, - ], - "creator": [ - { - "name": "Teppo Testaaja", - "@type": "Person", - "member_of": { - "name": {"fi": "Testiorganisaatio"}, - "@type": "Organization", - }, - } - ], - "access_rights": { - "access_type": { - "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", - "pref_label": {"fi": "Avoin", "en": "Open", "und": "Avoin"}, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/access_type", - } - }, - "preferred_identifier": "uniikkinen_aidentifaijeri", - }, - } - response = self.client.post("/rest/v2/datasets", legacy_dataset, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - return response.data["id"] - - def _create_new_dataset(self, dataset_json): - response = self.client.post("/rest/v2/datasets", dataset_json, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_cr_id = response.data["id"] - - return new_cr_id - def _create_new_dataset_version(self, id=1): """ Finds the latest version of given dataset, and creates a new version of it. @@ -221,43 +151,6 @@ def _create_new_dataset_version(self, id=1): return new_version_id - def _set_dataset_creation_date(self, cr_id, date): - """ - Forcibly changes the creation date for easier testing. Date-parameter is in form 'YYYY-MM-DD' - """ - - cr = CatalogRecord.objects_unfiltered.get(id=cr_id) - cr.date_created = parse_timestamp_string_to_tz_aware_datetime(date) - cr.force_save() - - def _get_catalog_record_size(self, id): - """ - Returns the size of given record id - """ - size = CatalogRecord.objects_unfiltered.get(id=id).research_dataset.get( - "total_files_byte_size", 0 - ) - - return size - - def _get_byte_size_of_month(self, date): - """ - Returns the byte size for given date. date is in format 'YYYY-MM' - """ - query = CatalogRecord.objects_unfiltered.filter(date_created__startswith=date) - list_of_sizes = [cr.research_dataset.get("total_files_byte_size", 0) for cr in query] - - return sum(list_of_sizes) - - def _get_total_byte_size(self): - """ - Returns byte size of all datasets in database - """ - query = CatalogRecord.objects_unfiltered.all() - list_of_sizes = [cr.research_dataset.get("total_files_byte_size", 0) for cr in query] - - return sum(list_of_sizes) - def _get_dataset_count_after(self, date): """ Return the total count after the date provided (inclusive). date is in format 'YYYY-MM-DD' @@ -272,27 +165,6 @@ def _get_dataset_count_of_month(self, date): """ return CatalogRecord.objects_unfiltered.filter(date_created__startswith=date).count() - def _get_total_dataset_count(self): - """ - Returns dataset count of entire database - """ - return CatalogRecord.objects_unfiltered.count() - - def _set_cr_datacatalog(self, cr_id, catalog_id): - cr = CatalogRecord.objects.get(pk=cr_id) - cr.data_catalog_id = DataCatalog.objects.get(catalog_json__identifier=catalog_id).id - cr.force_save() - - def _set_dataset_as_draft(self, cr_id): - cr = CatalogRecord.objects.get(pk=cr_id) - cr.state = "draft" - cr.force_save() - - def _set_cr_organization(self, cr_id, org): - cr = CatalogRecord.objects.get(pk=cr_id) - cr.metadata_owner_org = org - cr.force_save() - class StatisticRPCCountDatasets(StatisticRPCCommon, CatalogRecordApiWriteCommon): """ @@ -300,115 +172,6 @@ class StatisticRPCCountDatasets(StatisticRPCCommon, CatalogRecordApiWriteCommon) combinations. """ - def test_count_datasets_single(self): - """ - Tests single parameters for api. Empty removed and legacy parameters returns true AND false matches - """ - total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get("/rpc/v2/statistics/count_datasets").data - self.assertEqual(total_count, response["count"], response) - - # test removed -parameter - self._set_removed_dataset(id=2) - response = self.client.get("/rpc/v2/statistics/count_datasets?removed=true").data - self.assertEqual(response["count"], 1, response) - - response = self.client.get("/rpc/v2/statistics/count_datasets?removed=false").data - self.assertEqual(response["count"], total_count - 1, response) - - # test legacy -parameter - self._create_legacy_dataset() - total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get("/rpc/v2/statistics/count_datasets?legacy=true").data - self.assertEqual(response["count"], 1, response) - - response = self.client.get("/rpc/v2/statistics/count_datasets?legacy=false").data - self.assertEqual(response["count"], total_count - 1, response) - - # test latest -parameter - self._create_new_dataset_version() - self._create_new_dataset_version() - total_count = CatalogRecord.objects_unfiltered.count() - response = self.client.get( - "/rpc/v2/statistics/count_datasets?latest=false" - ).data # returns all - self.assertEqual(response["count"], total_count, response) - - with_param = self.client.get("/rpc/v2/statistics/count_datasets?latest=true").data - without_param = self.client.get("/rpc/v2/statistics/count_datasets").data # default is true - self.assertEqual(with_param["count"], total_count - 2, with_param) - self.assertEqual(with_param["count"], without_param["count"], with_param) - - def test_count_datasets_removed_latest(self): - second_ver = self._create_new_dataset_version() - self._create_new_dataset_version(second_ver) - self._set_removed_dataset() - self._set_removed_dataset(id=second_ver) - self._set_removed_dataset(id=2) - - rem_lat = self.client.get("/rpc/v2/statistics/count_datasets?removed=true&latest=true").data - rem_not_lat = self.client.get( - "/rpc/v2/statistics/count_datasets?removed=true&latest=false" - ).data - - self.assertEqual(rem_lat["count"], 1, "Only latest versions should be checked") # id=2 - self.assertEqual(rem_not_lat["count"], 3, "Only the prev versions should be removed") - - # create new dataset with 2 versions - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self._create_new_dataset_version(response.data["id"]) - - not_rem_lat = self.client.get( - "/rpc/v2/statistics/count_datasets?removed=false&latest=true" - ).data - not_rem_not_lat = self.client.get( - "/rpc/v2/statistics/count_datasets?removed=false&latest=false" - ).data - - self.assertEqual(not_rem_lat["count"], not_rem_not_lat["count"] - 1) - - def test_count_datasets_removed_legacy(self): - self._create_legacy_dataset() - self._create_legacy_dataset() - leg_cr = self._create_legacy_dataset() - self._set_removed_dataset(leg_cr) - total_count = CatalogRecord.objects_unfiltered.count() - - rem_leg = self.client.get("/rpc/v2/statistics/count_datasets?removed=true&legacy=true").data - rem_not_leg = self.client.get( - "/rpc/v2/statistics/count_datasets?removed=true&legacy=false" - ).data - not_rem_leg = self.client.get( - "/rpc/v2/statistics/count_datasets?removed=false&legacy=true" - ).data - not_rem_not_leg = self.client.get( - "/rpc/v2/statistics/count_datasets?removed=false&legacy=false" - ).data - - self.assertEqual(rem_leg["count"], 1) - self.assertEqual(rem_not_leg["count"], 0) - self.assertEqual(not_rem_leg["count"], 2) - self.assertEqual(not_rem_not_leg["count"], total_count - 3) - - def test_count_datasets_latest_legacy(self): - leg_cr = self._create_legacy_dataset() - self._create_new_dataset_version(leg_cr) - self._create_new_dataset_version(leg_cr) - total_count = CatalogRecord.objects_unfiltered.count() - - leg_lat = self.client.get("/rpc/v2/statistics/count_datasets?legacy=true&latest=true").data - leg_not_lat = self.client.get( - "/rpc/v2/statistics/count_datasets?legacy=true&latest=false" - ).data - not_leg_not_lat = self.client.get( - "/rpc/v2/statistics/count_datasets?legacy=false&latest=false" - ).data - - self.assertEqual(leg_lat["count"], 1) - self.assertEqual(leg_not_lat["count"], 3) - self.assertEqual(not_leg_not_lat["count"], total_count - 3) - def test_count_datasets_from_date(self): total_count = CatalogRecord.objects_unfiltered.count() @@ -432,518 +195,4 @@ def test_count_datasets_to_date(self): self.assertEqual(res["count"], total_count - after_jan_count) res = self.client.get("/rpc/v2/statistics/count_datasets?to_date=2019-02-01").data - self.assertEqual(res["count"], total_count - after_feb_count) - - -class StatisticRPCAllDatasetsCumulative(StatisticRPCCommon, CatalogRecordApiWriteCommon): - """ - Test suite for all_datasets_cumulative. Test only optional parameters removed, legacy and latest for now. - """ - - url = "/rpc/v2/statistics/all_datasets_cumulative" - dateparam_all = "from_date=2018-06&to_date=2019-03" - - def test_all_datasets_cumulative(self): - """ - Basic tests for all_datasets_cumulative including parameter checks and basic functionality. - - Return values for each interval: - count: Number of datasets created in this month - ida_byte_size: size of all files in datasets created this month - count_cumulative: number of datasets from from_date to this month (including) - ida_byte_size_cumulative: size of all files in datasets created from from_date to this month (including) - """ - - # test bad query parameters - response = self.client.get(f"{self.url}") - self.assertEqual( - response.status_code, - status.HTTP_400_BAD_REQUEST, - "from_date and to_date are required", - ) - - response = self.client.get(f"{self.url}?from_date=2019-11&to_date=bad_parameter") - self.assertEqual( - response.status_code, status.HTTP_400_BAD_REQUEST, "date format is YYYY-MM" - ) - - response = self.client.get(f"{self.url}?from_date=2019-11&to_date=2019-11-15") - self.assertEqual( - response.status_code, status.HTTP_400_BAD_REQUEST, "date format is YYYY-MM" - ) - - # test the basic functionality - june_size = self._get_byte_size_of_month("2018-06") - june_count = self._get_dataset_count_of_month("2018-06") - - july_size = self._get_byte_size_of_month("2018-07") - july_count = self._get_dataset_count_of_month("2018-07") - - march_size = self._get_byte_size_of_month("2019-03") - march_count = self._get_dataset_count_of_month("2019-03") - - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - response = self.client.get(f"{self.url}?{self.dateparam_all}").data - # ensure the counts and byte sizes are calculated correctly - self.assertEqual(response[0]["count"], june_count, response) - self.assertEqual(response[0]["ida_byte_size"], june_size, response) - self.assertEqual(response[0]["count_cumulative"], june_count, response) - self.assertEqual(response[0]["ida_byte_size_cumulative"], june_size, response) - - self.assertEqual(response[1]["count"], july_count, response) - self.assertEqual(response[1]["ida_byte_size"], july_size, response) - self.assertEqual(response[1]["count_cumulative"], june_count + july_count, response) - self.assertEqual(response[1]["ida_byte_size_cumulative"], june_size + july_size, response) - - self.assertEqual(response[-1]["count"], march_count, response) - self.assertEqual(response[-1]["ida_byte_size"], march_size, response) - self.assertEqual(response[-1]["count_cumulative"], total_count, response) - self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size, response) - - # test that only datasets from beginning of from_date is counted - response = self.client.get(f"{self.url}?from_date=2018-07&to_date=2019-03").data - - self.assertEqual(response[-1]["count_cumulative"], total_count - june_count, response) - self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size - june_size, response) - - def test_all_datasets_cumulative_single(self): - """ - Tests single parameters for api. Empty removed and legacy parameters returns true AND false matches - """ - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - # test removed -parameter - june_size = self._get_byte_size_of_month("2018-06") - june_count = self._get_dataset_count_of_month("2018-06") - - self._set_removed_dataset(id=8) # belongs to 2018-06, i.e. the first interval - removed_size = self._get_catalog_record_size(id=8) - - response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true").data - # ensure that only the first month (2018-06) contains dataset and that cumulative is calculated correctly - self.assertEqual(response[0]["count"], 1, response) - self.assertEqual(response[0]["ida_byte_size"], removed_size, response) - self.assertEqual(response[0]["count_cumulative"], 1, response) - self.assertEqual(response[0]["ida_byte_size_cumulative"], removed_size, response) - - self.assertEqual(response[-1]["count_cumulative"], 1, response) - self.assertEqual(response[-1]["ida_byte_size_cumulative"], removed_size, response) - - response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=false").data - # ensure that the correct dataset is missing from results - self.assertEqual(response[0]["count"], june_count - 1, response) - self.assertEqual(response[0]["ida_byte_size"], june_size - removed_size, response) - self.assertEqual(response[-1]["count_cumulative"], total_count - 1, response) - self.assertEqual( - response[-1]["ida_byte_size_cumulative"], - total_size - removed_size, - response, - ) - - # test legacy -parameter - leg_cr = ( - self._create_legacy_dataset() - ) # legacy cr belongs to 2019-03, i.e. the last interval - self._set_dataset_creation_date(leg_cr, "2019-03-13") - - legacy_size = self._get_catalog_record_size(id=leg_cr) - - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - march_size = self._get_byte_size_of_month("2019-03") - march_count = self._get_dataset_count_of_month("2019-03") - - response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true").data - - self.assertEqual(response[-1]["count"], 1, response) - self.assertEqual(response[-1]["ida_byte_size"], legacy_size, response) - self.assertEqual(response[-1]["count_cumulative"], 1, response) - self.assertEqual(response[-1]["ida_byte_size_cumulative"], legacy_size, response) - - response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=false").data - - self.assertEqual(response[-1]["count"], march_count - 1, response) - self.assertEqual(response[-1]["ida_byte_size"], march_size - legacy_size, response) - self.assertEqual(response[-1]["count_cumulative"], total_count - 1, response) - self.assertEqual( - response[-1]["ida_byte_size_cumulative"], total_size - legacy_size, response - ) - - # test latest -parameter - # new versions will belong to 2019-03, i.e. the last interval - second = self._create_new_dataset_version() - self._set_dataset_creation_date(second, "2019-03-17") - - old_ver_size = self._get_catalog_record_size(id=1) - - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - march_size = self._get_byte_size_of_month("2019-03") - march_count = self._get_dataset_count_of_month("2019-03") - - response = self.client.get( - f"{self.url}?{self.dateparam_all}&latest=false" - ).data # returns all - self.assertEqual(response[-1]["count"], march_count, response) - self.assertEqual(response[-1]["ida_byte_size"], march_size, response) - self.assertEqual(response[-1]["count_cumulative"], total_count, response) - self.assertEqual(response[-1]["ida_byte_size_cumulative"], total_size, response) - - with_param = self.client.get(f"{self.url}?{self.dateparam_all}&latest=true").data - self.assertEqual(with_param[-1]["count"], march_count - 1, with_param) - self.assertEqual(with_param[-1]["ida_byte_size"], march_size - old_ver_size, with_param) - self.assertEqual(with_param[-1]["count_cumulative"], total_count - 1, with_param) - self.assertEqual( - with_param[-1]["ida_byte_size_cumulative"], - total_size - old_ver_size, - response, - ) - - # ensure that default value(true) is working as expected - without_param = self.client.get(f"{self.url}?{self.dateparam_all}").data - self.assertEqual(with_param[-1]["count"], without_param[-1]["count"], with_param) - self.assertEqual( - with_param[-1]["ida_byte_size"], - without_param[-1]["ida_byte_size"], - with_param, - ) - self.assertEqual( - with_param[-1]["count_cumulative"], - without_param[-1]["count_cumulative"], - with_param, - ) - self.assertEqual( - with_param[-1]["ida_byte_size_cumulative"], - without_param[-1]["ida_byte_size_cumulative"], - with_param, - ) - - def test_all_datasets_cumulative_removed_latest(self): - second = self._create_new_dataset_version() - self._set_dataset_creation_date(second, "2019-03-11") - - self._set_removed_dataset(id=1) - self._set_removed_dataset(id=second) - - latest_size = self._get_catalog_record_size(id=second) - removed_size = self._get_catalog_record_size(id=1) + latest_size - removed_count = 2 - - rem_lat = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true&latest=true").data - - self.assertEqual(rem_lat[-1]["count"], 1, rem_lat) # id=second - self.assertEqual(rem_lat[-1]["ida_byte_size"], latest_size, rem_lat) - self.assertEqual(rem_lat[-1]["count_cumulative"], 1, rem_lat) - self.assertEqual(rem_lat[-1]["ida_byte_size_cumulative"], latest_size, rem_lat) - - rem_not_lat = self.client.get( - f"{self.url}?{self.dateparam_all}&removed=true&latest=false" - ).data - - self.assertEqual(rem_not_lat[-1]["count"], removed_count, rem_not_lat) # id=second - self.assertEqual(rem_not_lat[-1]["ida_byte_size"], removed_size, rem_not_lat) - self.assertEqual(rem_not_lat[-1]["count_cumulative"], removed_count, rem_not_lat) - self.assertEqual(rem_not_lat[-1]["ida_byte_size_cumulative"], removed_size, rem_not_lat) - - # create new dataset with two versions, which will not be deleted - new_cr_id = self._create_new_dataset(self.cr_test_data) - self._set_dataset_creation_date(new_cr_id, "2019-01-02") - - new_cr_ver = self._create_new_dataset_version(new_cr_id) - self._set_dataset_creation_date(new_cr_ver, "2019-01-06") - - old_version_size = self._get_catalog_record_size(id=new_cr_id) - - jan_count = self._get_dataset_count_of_month("2019-01") - jan_size = self._get_byte_size_of_month("2019-01") - - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - not_rem_lat = self.client.get( - f"{self.url}?{self.dateparam_all}&removed=false&latest=true" - ).data - - # missing the removed dataset from before and dataset id='new_cr_id' - self.assertEqual(not_rem_lat[-3]["count"], jan_count - 1, not_rem_lat) - self.assertEqual(not_rem_lat[-3]["ida_byte_size"], jan_size - old_version_size, not_rem_lat) - self.assertEqual( - not_rem_lat[-1]["count_cumulative"], - total_count - removed_count - 1, - not_rem_lat, - ) - self.assertEqual( - not_rem_lat[-1]["ida_byte_size_cumulative"], - total_size - removed_size - old_version_size, - not_rem_lat, - ) - - not_rem_not_lat = self.client.get( - f"{self.url}?{self.dateparam_all}&removed=false&latest=false" - ).data - - self.assertEqual(not_rem_not_lat[-3]["count"], jan_count, not_rem_not_lat) - self.assertEqual(not_rem_not_lat[-3]["ida_byte_size"], jan_size, not_rem_not_lat) - self.assertEqual( - not_rem_not_lat[-1]["count_cumulative"], - total_count - removed_count, - not_rem_not_lat, - ) - self.assertEqual( - not_rem_not_lat[-1]["ida_byte_size_cumulative"], - total_size - removed_size, - not_rem_not_lat, - ) - - def test_all_datasets_cumulative_removed_legacy(self): - leg_cr_1 = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr_1, "2018-07-03") - - leg_cr_2 = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr_2, "2019-02-08") - self._set_removed_dataset(leg_cr_2) - - self._set_removed_dataset(id=8) # belongs to first interval, i.e. 2018-06 - - leg_non_rem_size = self._get_catalog_record_size(leg_cr_1) - leg_removed_size = self._get_catalog_record_size(leg_cr_2) - removed_size = self._get_catalog_record_size(8) - - rem_leg_count = 3 - rem_leg_size = leg_non_rem_size + leg_removed_size + removed_size - - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - june_count = self._get_dataset_count_of_month("2018-06") - june_size = self._get_byte_size_of_month("2018-06") - - feb_count = self._get_dataset_count_of_month("2019-02") - feb_size = self._get_byte_size_of_month("2019-02") - - rem_leg = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true&legacy=true").data - - self.assertEqual(rem_leg[-2]["count"], 1, rem_leg) - self.assertEqual(rem_leg[-2]["ida_byte_size"], leg_removed_size, rem_leg) - self.assertEqual(rem_leg[-1]["count_cumulative"], 1, rem_leg) - self.assertEqual(rem_leg[-1]["ida_byte_size_cumulative"], leg_removed_size, rem_leg) - - rem_not_leg = self.client.get( - f"{self.url}?{self.dateparam_all}&removed=true&legacy=false" - ).data - - self.assertEqual(rem_not_leg[0]["count"], 1, rem_not_leg) - self.assertEqual(rem_not_leg[0]["ida_byte_size"], removed_size, rem_not_leg) - self.assertEqual(rem_not_leg[-1]["count_cumulative"], 1, rem_not_leg) - self.assertEqual(rem_not_leg[-1]["ida_byte_size_cumulative"], removed_size, rem_not_leg) - - not_rem_leg = self.client.get( - f"{self.url}?{self.dateparam_all}&removed=false&legacy=true" - ).data - - self.assertEqual(not_rem_leg[1]["count"], 1, not_rem_leg) - self.assertEqual(not_rem_leg[1]["ida_byte_size"], leg_non_rem_size, not_rem_leg) - self.assertEqual(not_rem_leg[-1]["count_cumulative"], 1, not_rem_leg) - self.assertEqual(not_rem_leg[-1]["ida_byte_size_cumulative"], leg_non_rem_size, not_rem_leg) - - not_rem_not_leg = self.client.get( - f"{self.url}?{self.dateparam_all}&removed=false&legacy=false" - ).data - - self.assertEqual(not_rem_not_leg[0]["count"], june_count - 1, not_rem_not_leg) - self.assertEqual( - not_rem_not_leg[0]["ida_byte_size"], - june_size - removed_size, - not_rem_not_leg, - ) - self.assertEqual(not_rem_not_leg[-2]["count"], feb_count - 1, not_rem_not_leg) - self.assertEqual( - not_rem_not_leg[-2]["ida_byte_size"], - feb_size - leg_removed_size, - not_rem_not_leg, - ) - self.assertEqual( - not_rem_not_leg[-1]["count_cumulative"], - total_count - rem_leg_count, - not_rem_not_leg, - ) - self.assertEqual( - not_rem_not_leg[-1]["ida_byte_size_cumulative"], - total_size - rem_leg_size, - not_rem_not_leg, - ) - - def test_all_datasets_cumulative_latest_legacy(self): - leg_cr = self._create_legacy_dataset() - self._set_dataset_creation_date(leg_cr, "2019-03-08") - - second = self._create_new_dataset_version(leg_cr) - self._set_dataset_creation_date(second, "2019-03-12") - - leg_cr_size = self._get_catalog_record_size(id=leg_cr) - second_size = self._get_catalog_record_size(id=second) - - legacy_count = 2 - legacy_size = leg_cr_size + second_size - - total_count = self._get_total_dataset_count() - total_size = self._get_total_byte_size() - - march_count = self._get_dataset_count_of_month("2019-03") - march_size = self._get_byte_size_of_month("2019-03") - - leg_lat = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true&latest=true").data - - self.assertEqual(leg_lat[-1]["count"], 1, leg_lat) - self.assertEqual(leg_lat[-1]["ida_byte_size"], second_size, leg_lat) - self.assertEqual(leg_lat[-1]["count_cumulative"], 1, leg_lat) - self.assertEqual(leg_lat[-1]["ida_byte_size_cumulative"], second_size, leg_lat) - - leg_not_lat = self.client.get( - f"{self.url}?{self.dateparam_all}&legacy=true&latest=false" - ).data - - self.assertEqual(leg_not_lat[-1]["count"], legacy_count, leg_not_lat) - self.assertEqual(leg_not_lat[-1]["ida_byte_size"], legacy_size, leg_not_lat) - self.assertEqual(leg_not_lat[-1]["count_cumulative"], legacy_count, leg_not_lat) - self.assertEqual(leg_not_lat[-1]["ida_byte_size_cumulative"], legacy_size, leg_not_lat) - - not_leg_not_lat = self.client.get( - f"{self.url}?{self.dateparam_all}&legacy=false&latest=false" - ).data - - self.assertEqual(not_leg_not_lat[-1]["count"], march_count - legacy_count, not_leg_not_lat) - self.assertEqual( - not_leg_not_lat[-1]["ida_byte_size"], - march_size - legacy_size, - not_leg_not_lat, - ) - self.assertEqual( - not_leg_not_lat[-1]["count_cumulative"], - total_count - legacy_count, - not_leg_not_lat, - ) - self.assertEqual( - not_leg_not_lat[-1]["ida_byte_size_cumulative"], - total_size - legacy_size, - not_leg_not_lat, - ) - - -class StatisticRPCforDrafts(StatisticRPCCommon, CatalogRecordApiWriteCommon): - """ - Tests that drafts are not taken into account when calculating statistics - """ - - def test_count_datasets_api_for_drafts(self): - """ - Tests that rpc/statistics/count_datasets returns only count of published datasets - """ - response_1 = self.client.get("/rpc/v2/statistics/count_datasets").data - - self._set_dataset_as_draft(1) - self.assertEqual( - CatalogRecord.objects.get(pk=1).state, - "draft", - "Dataset with id=1 should have changed state to draft", - ) - - response_2 = self.client.get("/rpc/v2/statistics/count_datasets").data - self.assertNotEqual( - response_1["count"], - response_2["count"], - "Drafts should not be returned in count_datasets api", - ) - - def test_all_datasets_cumulative_for_drafts(self): - """ - Tests that /rpc/v2/statistics/all_datasets_cumulative returns only published datasets - """ - url = "/rpc/v2/statistics/all_datasets_cumulative?from_date=2019-06&to_date=2019-06" - - self._set_dataset_creation_date(1, "2019-06-15") - response_1 = self.client.get(url).data - - self._set_dataset_as_draft(1) - response_2 = self.client.get(url).data - - # ensure the counts and byte sizes are calculated without drafts - self.assertNotEqual( - response_1[0]["count"], - response_2[0]["count"], - "Count for June should reduce by one as dataset id=1 was set as draft", - ) - self.assertNotEqual( - response_1[0]["ida_byte_size"], - response_2[0]["ida_byte_size"], - "Byte size for June should reduce by one as dataset id=1 was set as draft", - ) - - def test_catalog_datasets_cumulative_for_drafts(self): - """ - Tests that /rpc/v2/statistics/catalog_datasets_cumulative returns only published datasets - """ - - url = ( - "/rpc/v2/statistics/catalog_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - ) - catalog = "urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d3" - - self._set_dataset_creation_date(1, "2019-06-15") - self._set_cr_datacatalog(1, catalog) # Adds id=1 to catalog - - count_1 = self.client.get(url).data[catalog]["open"][0]["count"] - total_1 = self.client.get(url).data[catalog]["total"] - - self._set_dataset_as_draft(1) - - count_2 = self.client.get(url).data[catalog]["open"][0]["count"] - total_2 = self.client.get(url).data[catalog]["total"] - - # ensure the count and total are calculated without drafts - self.assertNotEqual( - count_1, - count_2, - "Count should reduce by one as dataset id=1 was set as draft", - ) - self.assertNotEqual( - total_1, - total_2, - "Total should reduce by one as dataset id=1 was set as draft", - ) - - def test_end_user_datasets_cumulative_for_drafts(self): - """ End user api should return only published data """ - url = "/rpc/v2/statistics/end_user_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - - self._set_dataset_creation_date(10, "2019-06-15") - count_1 = self.client.get(url).data[0]["count"] - - self._set_dataset_as_draft(10) - count_2 = self.client.get(url).data[0]["count"] - - # ensure the count are calculated without drafts - self.assertNotEqual( - count_1, - count_2, - "Count should be reduced by one after setting id=10 as draft", - ) - - def test_organization_datasets_cumulative_for_drafts(self): - """ Organization api should return only published data """ - url = "/rpc/v2/statistics/organization_datasets_cumulative?from_date=2019-06-01&to_date=2019-06-30" - - self._set_dataset_creation_date(1, "2019-06-15") - self._set_cr_organization(1, "org_2") - total_1 = self.client.get(url).data["org_2"]["total"] - - self._set_dataset_as_draft(1) - total_2 = self.client.get(url).data["org_2"]["total"] - - # ensure the totals are calculated without drafts - self.assertNotEqual(total_1, total_2, "Count be reduced by one after setting id=1 as draft") + self.assertEqual(res["count"], total_count - after_feb_count) \ No newline at end of file diff --git a/src/metax_api/tests/middleware/test_middleware_v2.py b/src/metax_api/tests/middleware/test_middleware_v2.py deleted file mode 100755 index 4b3d791d..00000000 --- a/src/metax_api/tests/middleware/test_middleware_v2.py +++ /dev/null @@ -1,206 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from django.utils import timezone -from pytz import timezone as tz -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.tests.api.rest.base.views.datasets.write import CatalogRecordApiWriteCommon -from metax_api.tests.utils import TestClassUtils -from metax_api.utils import parse_timestamp_string_to_tz_aware_datetime - -FORBIDDEN = status.HTTP_403_FORBIDDEN - - -class ApiAuthnzTest(APITestCase, TestClassUtils): - """ - Test use of HTTP Authorization header for authnz for POST, PUT, PATCH and - DELETE requests. API caller identification is performed as the first step - of each request, so any API is as good as any, even with invalid payload, - for testing purposes. - """ - - def setUp(self): - self._use_http_authorization() - - # - # - # - # read requests - # - # - # - - def test_authorization_not_required(self): - """ - GET operations are allowed for all. - """ - - # reset credentials - self.client.credentials() - - response = self.client.get("/rest/v2/datasets") - self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.get("/rest/v2/datasets/1") - self.assertNotEqual(response.status_code, FORBIDDEN) - - def test_optional_authorizatiaon_during_get(self): - """ - If auth headers are passed during GET, the user should then be identified by them. - """ - response = self.client.get("/rest/v2/datasets") - self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.get("/rest/v2/datasets/1") - self.assertNotEqual(response.status_code, FORBIDDEN) - - def test_optional_authorizatiaon_during_get_fails(self): - """ - If auth headers are passed during GET, the user should then be identified by them. - And if credentials are wrong, then access is forbidden - """ - self._use_http_authorization(username="nope", password="wrong") - response = self.client.get("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - - # - # - # - # write requests - # - # - # - - def test_authorization_ok(self): - """ - All write operations require proper authnz using HTTP Authorization header. - The following requests are invalid by their content, but none should fail to - the very first step of identifying the api caller. - """ - response = self.client.post("/rest/v2/datasets") - self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.put("/rest/v2/datasets") - self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.patch("/rest/v2/datasets") - self.assertNotEqual(response.status_code, FORBIDDEN) - response = self.client.delete("/rest/v2/datasets") - self.assertNotEqual(response.status_code, FORBIDDEN) - - def test_unknown_user(self): - """ - Unknown user credentials, every request should fail to the very first step - of identifying the api caller. - """ - self._use_http_authorization(username="other", password="pw") - response = self.client.post("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.put("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.patch("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.delete("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - - def test_wrong_password(self): - self._use_http_authorization(password="wrongpassword") - response = self.client.post("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.put("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.patch("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - response = self.client.delete("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - - # - # - # - # invalid auth header - # - # All errors during the auth header processing should return 403, without - # giving more specific errors. - # - # - # - - def test_malformed_auth_header(self): - self._use_http_authorization(header_value="Basic hubbabubba") - response = self.client.post("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - - def test_invalid_auth_method(self): - self._use_http_authorization(header_value="NotSupported hubbabubba") - response = self.client.post("/rest/v2/datasets") - self.assertEqual(response.status_code, FORBIDDEN) - - -class ApiModifyResponseTest(CatalogRecordApiWriteCommon): - def test_catalog_record_get_last_modified_header(self): - response = self.client.get("/rest/v2/datasets/1") - self._validate_response(response) - - def test_catalog_record_post_last_modified_header(self): - response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self._validate_response(response) - - def test_catalog_record_put_last_modified_header(self): - cr = self.client.get("/rest/v2/datasets/1", format="json").data - cr["preservation_description"] = "what" - response = self.client.put("/rest/v2/datasets/1", cr, format="json") - self._validate_response(response) - - def test_catalog_record_patch_last_modified_header(self): - cr = self.client.get("/rest/v2/datasets/1", format="json").data - cr["preservation_description"] = "what" - response = self.client.patch("/rest/v2/datasets/1", cr, format="json") - self._validate_response(response) - - def test_catalog_record_delete_does_not_contain_last_modified_header(self): - response = self.client.delete("/rest/v2/datasets/1") - self.assertFalse(response.has_header("Last-Modified")) - - def test_catalog_record_bulk_create_get_last_modified_header(self): - response = self.client.post( - "/rest/v2/datasets", [self.cr_test_data, self.cr_test_data], format="json" - ) - self._validate_response(response) - - def _validate_response(self, response): - data = response.data.get("success", response.data) - obj = data[0].get("object", None) if isinstance(data, list) else data - self.assertIsNotNone(obj) - - expected_modified_str = ( - obj["date_modified"] if "date_modified" in obj else obj.get("date_created", None) - ) - expected_modified = timezone.localtime( - parse_timestamp_string_to_tz_aware_datetime(expected_modified_str), - timezone=tz("GMT"), - ) - - self.assertTrue(response.has_header("Last-Modified")) - actual_modified = timezone.localtime( - parse_timestamp_string_to_tz_aware_datetime(response.get("Last-Modified")), - timezone=tz("GMT"), - ) - - self.assertEqual(expected_modified, actual_modified) - - -class ApiStreamHttpResponse(CatalogRecordApiWriteCommon): - def test_no_streaming_with_paging(self): - response = self.client.get("/rest/v2/datasets?stream=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.streaming, False) - - def test_streaming_json(self): - response = self.client.get("/rest/v2/datasets?no_pagination=true&stream=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.streaming, True) - response = self.client.get("/rest/v2/files?no_pagination=true&stream=true") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.streaming, True) From f856469072882dcb6d0ec6938ff52b32783c5e22 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Thu, 22 Jul 2021 14:41:25 +0300 Subject: [PATCH 044/160] Two atomic blocks added --- src/metax_api/api/rpc/v2/views/dataset_rpc.py | 35 +++++++++++-------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/src/metax_api/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/api/rpc/v2/views/dataset_rpc.py index cbffd5c4..0d587969 100755 --- a/src/metax_api/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/v2/views/dataset_rpc.py @@ -7,6 +7,7 @@ import logging +from django.db import DatabaseError, transaction from django.http import Http404 from rest_framework import status from rest_framework.decorators import action @@ -71,15 +72,18 @@ def change_cumulative_state(self, request): @action(detail=False, methods=["post"], url_path="create_draft") def create_draft(self, request): + try: + with transaction.atomic(): + cr = self.get_object() - cr = self.get_object() - - cr.create_draft() + cr.create_draft() - return Response( - data={"id": cr.next_draft.id, "identifier": cr.next_draft.identifier}, - status=status.HTTP_201_CREATED, - ) + return Response( + data={"id": cr.next_draft.id, "identifier": cr.next_draft.identifier}, + status=status.HTTP_201_CREATED, + ) + except DatabaseError: + return Response({'error': 'Failed to create draft'}, status=status.HTTP_400_BAD_REQUEST) @action(detail=False, methods=["post"], url_path="create_new_version") def create_new_version(self, request): @@ -98,15 +102,18 @@ def create_new_version(self, request): @action(detail=False, methods=["post"], url_path="publish_dataset") def publish_dataset(self, request): + try: + with transaction.atomic(): + cr = self.get_object() - cr = self.get_object() - - cr.publish_dataset() + cr.publish_dataset() - return Response( - data={"preferred_identifier": cr.preferred_identifier}, - status=status.HTTP_200_OK, - ) + return Response( + data={"preferred_identifier": cr.preferred_identifier}, + status=status.HTTP_200_OK, + ) + except DatabaseError: + return Response({'error': 'Failed to publish dataset'}, status=status.HTTP_400_BAD_REQUEST) @action(detail=False, methods=["post"], url_path="merge_draft") def merge_draft(self, request): From fed7eb148f4489e0b004347b1eb680992627b7e5 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Fri, 23 Jul 2021 15:17:36 +0300 Subject: [PATCH 045/160] added functionality where total_files_byte_size field is dropped from drafts/datasets with no files/dirs --- src/metax_api/models/catalog_record.py | 3 +++ src/metax_api/models/catalog_record_v2.py | 3 +++ .../tests/api/rest/base/views/datasets/write.py | 9 +++++++++ .../api/rest/v2/views/datasets/filehandling.py | 15 +++++++++++++-- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index a57b87fa..c6aae125 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -1445,6 +1445,9 @@ def _pre_create_operations(self, pid_type=None): if "remote_resources" in self.research_dataset: self._calculate_total_remote_resources_byte_size() + if not ("files" in self.research_dataset or "directories" in self.research_dataset) and "total_files_byte_size" in self.research_dataset: + self.research_dataset.pop("total_files_byte_size") + if self.cumulative_state == self.CUMULATIVE_STATE_CLOSED: raise Http400("Cannot create cumulative dataset with state closed") diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index c199bf6d..d6f97767 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -105,6 +105,9 @@ def _pre_create_operations(self): self.research_dataset["metadata_version_identifier"] = generate_uuid_identifier() self.identifier = generate_uuid_identifier() + if not ("files" in self.research_dataset or "directories" in self.research_dataset) and "total_files_byte_size" in self.research_dataset: + self.research_dataset.pop("total_files_byte_size") + if not self._save_as_draft(): self._generate_issued_date() diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index 8a68881e..2986b797 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -3324,6 +3324,15 @@ def test_add_files_to_empty_cumulative_dataset(self): "there should be no new datasets", ) + def test_total_files_byte_size_field_is_dropped_from_datasets_with_no_files(self): + # dataset with no files/dirs does not have total_files_byte_size field + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) + response = self.client.post("/rest/datasets", self.cr_test_data, format="json") + cr_id = response.data["id"] + response = self.client.get(f"/rest/datasets/{cr_id}") + self.assertEqual(response.data.get("research_dataset").get("total_files_byte_size"), None) + def test_adding_files_to_cumulative_dataset_creates_no_new_versions(self): """ Tests the basic idea of cumulative dataset: add files with no new version diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py index 2d57d168..c85a7f1f 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py @@ -469,7 +469,6 @@ def _set_token_authentication(self): def _create_draft(self): self.cr_test_data["research_dataset"].pop("files", None) self.cr_test_data["research_dataset"].pop("directories", None) - self.cr_test_data["research_dataset"].pop("total_files_byte_size", None) response = self.client.post("/rest/v2/datasets?draft", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) return response.data["id"] @@ -830,10 +829,22 @@ def test_total_files_byte_size_is_updated_after_adding_files(self): ) self.assertEqual(response.data.get("files_added"), 1, response.data) self.assert_file_count(cr_id, 1) - response = self.client.get(f"/rest/v2/datasets/{cr_id}") self.assert_total_files_byte_size(response.data, 100) + def test_total_files_byte_size_field_is_dropped_from_drafts_with_no_files(self): + # draft with no files/dirs does not have total_files_byte_size field + cr_id = self._create_draft() + response = self.client.get(f"/rest/v2/datasets/{cr_id}") + self.assertEqual(response.data.get("research_dataset").get("total_files_byte_size"), None) + + def test_total_files_byte_size_field_is_dropped_from_datasets_with_no_files(self): + # dataset with no files/dirs does not have total_files_byte_size field + response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") + cr_id = response.data["id"] + response = self.client.get(f"/rest/v2/datasets/{cr_id}") + self.assertEqual(response.data.get("research_dataset").get("total_files_byte_size"), None) + class CatalogRecordUserMetadata(CatalogRecordApiWriteAssignFilesCommonV2): From 7623da2365099f2649ca921b10db620716148bc3 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Fri, 23 Jul 2021 15:19:22 +0300 Subject: [PATCH 046/160] fixed bug from test made for ticket CSCFAIRMETA-1064 that was already merged --- src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py index c85a7f1f..e1b49f43 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/filehandling.py @@ -818,7 +818,7 @@ def test_total_files_byte_size_is_updated_after_adding_files(self): cr_id = self._create_draft() response = self.client.get(f"/rest/v2/datasets/{cr_id}") cr_id = response.data["id"] - self.assertEqual(response.data.get("total_files_byte_size"), None) + self.assertEqual(response.data.get("research_dataset").get("total_files_byte_size"), None) # add file to dataset file_changes = {} From cc9a67cde0bf302e2a579fb18ae300b229c629de Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 27 Jul 2021 10:10:04 +0300 Subject: [PATCH 047/160] deleted unnecessary code from tests --- .gitignore | 3 +-- .../tests/api/rpc/base/views/dataset_rpc.py | 21 +++---------------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/.gitignore b/.gitignore index 553f38d8..2f47d8c2 100755 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ /src/__pycache__/* /src/static/* /src/.coverage +/src/coverage_data /swagger/*index.html /swagger/*/index.html .idea/ @@ -34,5 +35,3 @@ coverage.xml .hypothesis/ .pytest_cache/ pytestdebug.log - -venv diff --git a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py index 84771c8d..2e602a6f 100755 --- a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py @@ -254,18 +254,6 @@ def test_refresh_adds_new_files(self): file_byte_size_before, file_size_after, self._single_file_byte_size * 2 ) - # freeze two files to /TestExperiment/Directory_2/Group_3 - self._freeze_new_files() - response = self.client.post(self.url % (new_version.identifier, dir_id), format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data["number_of_files_added"], 2) - - new_version = CatalogRecord.objects.get(id=response.data["new_version_created"]["id"]) - self.assertEqual( - new_version.files.count(), - new_version.previous_dataset_version.files.count() + 2, - ) - def test_adding_parent_dir_allows_refreshes_to_child_dirs(self): """ When parent directory is added to dataset, refreshes to child directories are also possible. @@ -398,12 +386,9 @@ def test_adding_files_from_non_assigned_dir_is_not_allowed(self): response = self.client.post("/rest/datasets", self.cr_test_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) cr_id = response.data["identifier"] - - # create another dataset so that dir /SecondExperiment/Data_Config will be created - self._add_directory(self.cr_test_data, "/SecondExperiment/Data_Config") - response = self.client.post("/rest/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - dir_id = response.data["research_dataset"]["directories"][1]["identifier"] + dir_id = Directory.objects.filter( + directory_path="/SecondExperiment/Data_Config" + ).first().identifier response = self.client.post(self.url % (cr_id, dir_id), format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) From 4d08aed9530faf82a425f45bd779278babf61a42 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Thu, 8 Jul 2021 11:09:03 +0300 Subject: [PATCH 048/160] added custom renderer to change API response content type to json --- src/metax_api/renderers/renderers.py | 5 +++++ src/metax_api/tests/api/rest/base/views/schemas/read.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/metax_api/renderers/renderers.py b/src/metax_api/renderers/renderers.py index c19a3b60..862bb054 100755 --- a/src/metax_api/renderers/renderers.py +++ b/src/metax_api/renderers/renderers.py @@ -21,6 +21,11 @@ class HTMLToJSONRenderer(renderers.JSONRenderer): media_type = "text/html" charset = "utf-8" + def render(self, data, media_type=None, renderer_context=None): + rendered_data = super().render(data, media_type, renderer_context) + renderer_context['response']['Content-Type'] = "application/json; charset=utf-8" + return rendered_data + class XMLRenderer(renderers.BaseRenderer): diff --git a/src/metax_api/tests/api/rest/base/views/schemas/read.py b/src/metax_api/tests/api/rest/base/views/schemas/read.py index 74a83f94..88122020 100755 --- a/src/metax_api/tests/api/rest/base/views/schemas/read.py +++ b/src/metax_api/tests/api/rest/base/views/schemas/read.py @@ -21,7 +21,7 @@ def test_read_schemas_list_html(self): headers = {"HTTP_ACCEPT": "text/html"} response = self.client.get("/rest/schemas", **headers) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertTrue(response._headers["content-type"][1].find("text/html") >= 0) + self.assertTrue(response._headers["content-type"][1].find("json") >= 0) def test_read_schema_retrieve_existing(self): list_response = self.client.get("/rest/schemas") From 56992bc194520f37a4dd9d861ccdc26cbe5b4032 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Mon, 12 Jul 2021 13:06:07 +0300 Subject: [PATCH 049/160] REMS documentation is now shown if REMS_ENABLED variable is set true --- config-swap-stack.yml | 1 + docker-compose.yml | 1 + docs/api/conf.py | 4 +++- docs/api/v1/datasets.rst | 2 +- docs/api/v2/datasets.rst | 2 +- 5 files changed, 7 insertions(+), 3 deletions(-) diff --git a/config-swap-stack.yml b/config-swap-stack.yml index dbb7a062..36ea2ee6 100644 --- a/config-swap-stack.yml +++ b/config-swap-stack.yml @@ -56,6 +56,7 @@ services: hostname: metax-docs environment: DOMAIN: 0.0.0.0:8008 + REMS_ENABLED: 'true' ports: - 8088:8000 volumes: diff --git a/docker-compose.yml b/docker-compose.yml index 09e1e661..c592c1d8 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,6 +66,7 @@ services: hostname: metax-docs environment: DOMAIN: metax.fd-dev.csc.fi + REMS_ENABLED: 'true' ports: - 8088:8000 volumes: diff --git a/docs/api/conf.py b/docs/api/conf.py index b892e426..8cf93ade 100755 --- a/docs/api/conf.py +++ b/docs/api/conf.py @@ -31,6 +31,7 @@ domain = os.getenv("DOMAIN", "metax.fairdata.fi") branch = os.getenv("BRANCH", "master") etsin_url = os.getenv("ETSIN_URL", "etsin.fairdata.fi") +rems_enabled = os.getenv("REMS_ENABLED", 'false') # -- General configuration --------------------------------------------------- @@ -41,7 +42,7 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [] +extensions = ['sphinx.ext.ifconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -183,5 +184,6 @@ def replace(app, docname, source): def setup(app): app.add_config_value('replacements', {}, True) + app.add_config_value('rems_enabled', 'false', 'env') app.connect('source-read', replace) app.add_css_file("custom.css") diff --git a/docs/api/v1/datasets.rst b/docs/api/v1/datasets.rst index 4b81e42a..0a7e958f 100755 --- a/docs/api/v1/datasets.rst +++ b/docs/api/v1/datasets.rst @@ -538,7 +538,7 @@ It helps to have the `research_dataset data model visualization Date: Mon, 26 Jul 2021 14:43:22 +0300 Subject: [PATCH 050/160] WIP: CSCFAIRMETA-1021, initial fix for missing index --- docs/api/conf.py | 11 +++++++++-- docs/api/v1/datasets.rst | 2 +- docs/api/v1/index.rst | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/api/conf.py b/docs/api/conf.py index 8cf93ade..c0b3b3c0 100755 --- a/docs/api/conf.py +++ b/docs/api/conf.py @@ -31,7 +31,6 @@ domain = os.getenv("DOMAIN", "metax.fairdata.fi") branch = os.getenv("BRANCH", "master") etsin_url = os.getenv("ETSIN_URL", "etsin.fairdata.fi") -rems_enabled = os.getenv("REMS_ENABLED", 'false') # -- General configuration --------------------------------------------------- @@ -77,6 +76,10 @@ "__ETSIN_ENV_BASE_URL__": f"{etsin_url}" } +tags = { + 'rems_enabled': os.getenv("REMS_ENABLED", 'false') +} + # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -181,9 +184,13 @@ def replace(app, docname, source): for key, value in app.config.replacements.items(): source[0] = source[0].replace(key, value) +def add_tags(app): + for key, value in app.config.tags.items(): + if value: + app.tags.add(key) def setup(app): app.add_config_value('replacements', {}, True) - app.add_config_value('rems_enabled', 'false', 'env') + add_tags(app) app.connect('source-read', replace) app.add_css_file("custom.css") diff --git a/docs/api/v1/datasets.rst b/docs/api/v1/datasets.rst index 0a7e958f..fd6d651b 100755 --- a/docs/api/v1/datasets.rst +++ b/docs/api/v1/datasets.rst @@ -538,7 +538,7 @@ It helps to have the `research_dataset data model visualization Date: Tue, 27 Jul 2021 11:56:08 +0300 Subject: [PATCH 051/160] fixed missing index problem and removed numeration of sections --- docs/api/conf.py | 2 +- docs/api/v2/datasets.rst | 2 +- docs/api/v2/index.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api/conf.py b/docs/api/conf.py index c0b3b3c0..27b6dd67 100755 --- a/docs/api/conf.py +++ b/docs/api/conf.py @@ -185,7 +185,7 @@ def replace(app, docname, source): source[0] = source[0].replace(key, value) def add_tags(app): - for key, value in app.config.tags.items(): + for key, value in tags.items(): if value: app.tags.add(key) diff --git a/docs/api/v2/datasets.rst b/docs/api/v2/datasets.rst index e3cd1804..0f875fcf 100755 --- a/docs/api/v2/datasets.rst +++ b/docs/api/v2/datasets.rst @@ -509,7 +509,7 @@ It helps to have the `research_dataset data model visualization Date: Tue, 27 Jul 2021 13:53:00 +0300 Subject: [PATCH 052/160] removed unused extension and changed rems_enabled from string to boolean variable --- config-swap-stack.yml | 2 +- docker-compose.yml | 2 +- docs/api/conf.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/config-swap-stack.yml b/config-swap-stack.yml index 36ea2ee6..b4e176ff 100644 --- a/config-swap-stack.yml +++ b/config-swap-stack.yml @@ -56,7 +56,7 @@ services: hostname: metax-docs environment: DOMAIN: 0.0.0.0:8008 - REMS_ENABLED: 'true' + REMS_ENABLED: true ports: - 8088:8000 volumes: diff --git a/docker-compose.yml b/docker-compose.yml index c592c1d8..b4e9780e 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,7 +66,7 @@ services: hostname: metax-docs environment: DOMAIN: metax.fd-dev.csc.fi - REMS_ENABLED: 'true' + REMS_ENABLED: true ports: - 8088:8000 volumes: diff --git a/docs/api/conf.py b/docs/api/conf.py index 27b6dd67..d52b2c26 100755 --- a/docs/api/conf.py +++ b/docs/api/conf.py @@ -41,7 +41,6 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.ifconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -77,7 +76,7 @@ } tags = { - 'rems_enabled': os.getenv("REMS_ENABLED", 'false') + 'rems_enabled': os.getenv("REMS_ENABLED", False) } # -- Options for HTML output ------------------------------------------------- From 35362d2e33b471dbb63157ca148d0376dfe506ca Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 27 Jul 2021 16:25:43 +0300 Subject: [PATCH 053/160] changed env variables to strings because booleans were not allowed and modified conf.py file according to comment --- config-swap-stack.yml | 2 +- docker-compose.yml | 2 +- docs/api/conf.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config-swap-stack.yml b/config-swap-stack.yml index b4e176ff..a89850f7 100644 --- a/config-swap-stack.yml +++ b/config-swap-stack.yml @@ -56,7 +56,7 @@ services: hostname: metax-docs environment: DOMAIN: 0.0.0.0:8008 - REMS_ENABLED: true + REMS_ENABLED: "true" ports: - 8088:8000 volumes: diff --git a/docker-compose.yml b/docker-compose.yml index b4e9780e..7e37522f 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,7 +66,7 @@ services: hostname: metax-docs environment: DOMAIN: metax.fd-dev.csc.fi - REMS_ENABLED: true + REMS_ENABLED: "true" ports: - 8088:8000 volumes: diff --git a/docs/api/conf.py b/docs/api/conf.py index d52b2c26..1b73d209 100755 --- a/docs/api/conf.py +++ b/docs/api/conf.py @@ -76,7 +76,7 @@ } tags = { - 'rems_enabled': os.getenv("REMS_ENABLED", False) + 'rems_enabled': os.getenv("REMS_ENABLED", 'False') } # -- Options for HTML output ------------------------------------------------- @@ -185,7 +185,7 @@ def replace(app, docname, source): def add_tags(app): for key, value in tags.items(): - if value: + if value.lower() in ('true'): app.tags.add(key) def setup(app): From 0ff67184e1f657e861818e872ceb4965a8b4e4e6 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Thu, 29 Jul 2021 08:34:17 +0300 Subject: [PATCH 054/160] More atomic blocks added --- src/metax_api/api/rpc/v2/views/dataset_rpc.py | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/metax_api/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/api/rpc/v2/views/dataset_rpc.py index 0d587969..4ca86b70 100755 --- a/src/metax_api/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/v2/views/dataset_rpc.py @@ -87,18 +87,21 @@ def create_draft(self, request): @action(detail=False, methods=["post"], url_path="create_new_version") def create_new_version(self, request): + try: + with transaction.atomic(): + cr = self.get_object() - cr = self.get_object() - - cr.create_new_version() + cr.create_new_version() - return Response( - data={ - "id": cr.next_dataset_version.id, - "identifier": cr.next_dataset_version.identifier, - }, - status=status.HTTP_201_CREATED, - ) + return Response( + data={ + "id": cr.next_dataset_version.id, + "identifier": cr.next_dataset_version.identifier, + }, + status=status.HTTP_201_CREATED, + ) + except DatabaseError: + return Response({'error': 'Failed to create a new version'}, status=status.HTTP_400_BAD_REQUEST) @action(detail=False, methods=["post"], url_path="publish_dataset") def publish_dataset(self, request): From 3a23b5984f4e1ecc801a537227b0a4cd80ab814b Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Wed, 2 Jun 2021 17:29:34 +0300 Subject: [PATCH 055/160] wip --- src/metax_api/api/rest/v2/router.py | 5 +++-- .../api/rest/v2/serializers/api_error_serializer.py | 7 +++++++ src/metax_api/api/rest/v2/views/api_error_view.py | 7 +++++++ src/metax_api/models/api_error_model.py | 6 ++++++ 4 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 src/metax_api/api/rest/v2/serializers/api_error_serializer.py create mode 100644 src/metax_api/api/rest/v2/views/api_error_view.py create mode 100644 src/metax_api/models/api_error_model.py diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index a0ea1d9a..38233004 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -24,13 +24,13 @@ from metax_api.api.rest.base import CustomRouter from metax_api.api.rest.base.views import ( - ApiErrorViewSet, ContractViewSet, DirectoryViewSet, FileStorageViewSet, FileViewSet, SchemaViewSet, ) +from metax_api.api.rest.v2.views import ApiErrorViewSet from .views import DataCatalogViewSet, DatasetViewSet @@ -93,7 +93,7 @@ def __init__(self, *args, **kwargs): # v2 urls, but using v1 view classes, because nothing changes router_v1 = CustomRouter(trailing_slash=False) -router_v1.register(r"apierrors/?", ApiErrorViewSet) +# router_v1.register(r"apierrors/?", ApiErrorViewSet) router_v1.register(r"contracts/?", ContractViewSet) router_v1.register(r"directories/?", DirectoryViewSet) router_v1.register(r"files/?", FileViewSet) @@ -103,6 +103,7 @@ def __init__(self, *args, **kwargs): # v2 urls, using v2 view classes with changes router_v2 = CustomRouterV2(trailing_slash=False) +router_v2.register(r"apierrors/?", ApiErrorViewSet) router_v2.register(r"datasets/?", DatasetViewSet) router_v2.register(r"datacatalogs/?", DataCatalogViewSet) router_v2.register( diff --git a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py new file mode 100644 index 00000000..13dbfcda --- /dev/null +++ b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py @@ -0,0 +1,7 @@ +from rest_framework.serializers import ModelSerializer + +class ApiErrorSerializer(ModelSerializer): + + def __init__(self, *args, **kwargs): + + super(ApiErrorSerializer, self).__init__(*args, **kwargs) \ No newline at end of file diff --git a/src/metax_api/api/rest/v2/views/api_error_view.py b/src/metax_api/api/rest/v2/views/api_error_view.py new file mode 100644 index 00000000..0eababa6 --- /dev/null +++ b/src/metax_api/api/rest/v2/views/api_error_view.py @@ -0,0 +1,7 @@ +from rest_framework.viewsets import ModelViewSet + +class ApiErrorViewSet(ModelViewSet): + + def __init__(self, *args, **kwargs): + + super(ApiErrorViewSet, self).__init__(*args, **kwargs) \ No newline at end of file diff --git a/src/metax_api/models/api_error_model.py b/src/metax_api/models/api_error_model.py new file mode 100644 index 00000000..e67799b2 --- /dev/null +++ b/src/metax_api/models/api_error_model.py @@ -0,0 +1,6 @@ +class ApiErrorModel(models.Model): + + d = models.BigAutoField(primary_key=True, editable=False) + identifier = models.CharField(max_length=200, unique=True, null=False) + error = JSONField(null=False) + date_created = models.DateTimeField() \ No newline at end of file From 85b760d78add0dbdf983ea3bfba25e9571d45a33 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Fri, 4 Jun 2021 11:07:23 +0300 Subject: [PATCH 056/160] wip2 --- src/metax_api/api/rest/v2/router.py | 1 - .../api/rest/v2/serializers/__init__.py | 1 + .../v2/serializers/api_error_serializer.py | 4 +-- src/metax_api/api/rest/v2/views/__init__.py | 1 + .../api/rest/v2/views/api_error_view.py | 16 +++++++++-- src/metax_api/migrations/0034_apierror.py | 22 +++++++++++++++ src/metax_api/models/__init__.py | 1 + src/metax_api/models/api_error.py | 28 +++++++++++++++++++ src/metax_api/models/api_error_model.py | 6 ---- 9 files changed, 68 insertions(+), 12 deletions(-) create mode 100644 src/metax_api/migrations/0034_apierror.py create mode 100644 src/metax_api/models/api_error.py delete mode 100644 src/metax_api/models/api_error_model.py diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index 38233004..47b03a18 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -93,7 +93,6 @@ def __init__(self, *args, **kwargs): # v2 urls, but using v1 view classes, because nothing changes router_v1 = CustomRouter(trailing_slash=False) -# router_v1.register(r"apierrors/?", ApiErrorViewSet) router_v1.register(r"contracts/?", ContractViewSet) router_v1.register(r"directories/?", DirectoryViewSet) router_v1.register(r"files/?", FileViewSet) diff --git a/src/metax_api/api/rest/v2/serializers/__init__.py b/src/metax_api/api/rest/v2/serializers/__init__.py index da373ff1..0e40043a 100755 --- a/src/metax_api/api/rest/v2/serializers/__init__.py +++ b/src/metax_api/api/rest/v2/serializers/__init__.py @@ -5,5 +5,6 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT +from .api_error_serializer import ApiErrorSerializerV2 from .catalog_record_serializer import CatalogRecordSerializerV2 from .data_catalog_serializer import DataCatalogSerializerV2 diff --git a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py index 13dbfcda..a2b2b31d 100644 --- a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py @@ -1,7 +1,7 @@ from rest_framework.serializers import ModelSerializer -class ApiErrorSerializer(ModelSerializer): +class ApiErrorSerializerV2(ModelSerializer): def __init__(self, *args, **kwargs): - super(ApiErrorSerializer, self).__init__(*args, **kwargs) \ No newline at end of file + super(ApiErrorSerializerV2, self).__init__(*args, **kwargs) \ No newline at end of file diff --git a/src/metax_api/api/rest/v2/views/__init__.py b/src/metax_api/api/rest/v2/views/__init__.py index 112f422e..10bcf92f 100755 --- a/src/metax_api/api/rest/v2/views/__init__.py +++ b/src/metax_api/api/rest/v2/views/__init__.py @@ -7,3 +7,4 @@ from .data_catalog_view import DataCatalogViewSet from .dataset_view import DatasetViewSet +from .api_error_view import ApiErrorViewSet diff --git a/src/metax_api/api/rest/v2/views/api_error_view.py b/src/metax_api/api/rest/v2/views/api_error_view.py index 0eababa6..2371d271 100644 --- a/src/metax_api/api/rest/v2/views/api_error_view.py +++ b/src/metax_api/api/rest/v2/views/api_error_view.py @@ -1,7 +1,17 @@ -from rest_framework.viewsets import ModelViewSet +from metax_api.permissions import ServicePermissions +from metax_api.api.rest.base.views import ApiErrorViewSet -class ApiErrorViewSet(ModelViewSet): +class ApiErrorViewSet(ApiErrorViewSet): + + api_type = "rest" + authentication_classes = () + permission_classes = [ServicePermissions] def __init__(self, *args, **kwargs): - super(ApiErrorViewSet, self).__init__(*args, **kwargs) \ No newline at end of file + super(ApiErrorViewSet, self).__init__(*args, **kwargs) + + # def list(self, request, *args, **kwargs): + # # hae modelista + # error_list = ApiErrorService.retrieve_error_list() + # return Response(data=error_list, status=200) \ No newline at end of file diff --git a/src/metax_api/migrations/0034_apierror.py b/src/metax_api/migrations/0034_apierror.py new file mode 100644 index 00000000..fa8ad746 --- /dev/null +++ b/src/metax_api/migrations/0034_apierror.py @@ -0,0 +1,22 @@ +# Generated by Django 3.1.8 on 2021-06-04 07:03 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0033_auto_20210201_1551'), + ] + + operations = [ + migrations.CreateModel( + name='ApiError', + fields=[ + ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), + ('identifier', models.CharField(max_length=200, unique=True)), + ('error', models.JSONField()), + ('date_created', models.DateTimeField()), + ], + ), + ] diff --git a/src/metax_api/models/__init__.py b/src/metax_api/models/__init__.py index 2cd13040..ebd576a7 100755 --- a/src/metax_api/models/__init__.py +++ b/src/metax_api/models/__init__.py @@ -5,6 +5,7 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT +from .api_error import ApiError from .catalog_record import AlternateRecordSet, CatalogRecord from .catalog_record_v2 import CatalogRecordV2 from .common import Common diff --git a/src/metax_api/models/api_error.py b/src/metax_api/models/api_error.py new file mode 100644 index 00000000..2748ef13 --- /dev/null +++ b/src/metax_api/models/api_error.py @@ -0,0 +1,28 @@ +from django.db.models import JSONField +from django.db import models + +class ApiError(models.Model): + + id = models.BigAutoField(primary_key=True, editable=False) + identifier = models.CharField(max_length=200, unique=True, null=False) + error = JSONField(null=False) + date_created = models.DateTimeField() + + # def save(self, *args, **kwargs): + # error_info = { + # "method": request.method, + # "user": request.user.username or "guest", + # "data": request_data, + # "headers": { + # k: v + # for k, v in request.META.items() + # if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" + # }, + # "status_code": response.status_code, + # "response": response.data, + # "traceback": traceback.format_exc(), + # # during test case execution, RAW_URI is not set + # "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), + # "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), + # "exception_time": current_time, + # } \ No newline at end of file diff --git a/src/metax_api/models/api_error_model.py b/src/metax_api/models/api_error_model.py deleted file mode 100644 index e67799b2..00000000 --- a/src/metax_api/models/api_error_model.py +++ /dev/null @@ -1,6 +0,0 @@ -class ApiErrorModel(models.Model): - - d = models.BigAutoField(primary_key=True, editable=False) - identifier = models.CharField(max_length=200, unique=True, null=False) - error = JSONField(null=False) - date_created = models.DateTimeField() \ No newline at end of file From 8a2708ab3eec451f02b4ce9b0fa4d54758ecec09 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Fri, 11 Jun 2021 11:31:28 +0300 Subject: [PATCH 057/160] wip3 --- .../api/rest/base/views/common_view.py | 9 +- src/metax_api/api/rest/v2/router.py | 4 +- .../v2/serializers/api_error_serializer.py | 13 ++- src/metax_api/api/rest/v2/views/__init__.py | 2 +- .../api/rest/v2/views/api_error_view.py | 83 ++++++++++++-- src/metax_api/models/api_error.py | 105 +++++++++++++++--- src/metax_api/services/api_error_service.py | 5 +- 7 files changed, 182 insertions(+), 39 deletions(-) diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index b0b771d3..d31dc43d 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -18,8 +18,8 @@ from metax_api.exceptions import Http400, Http403, Http500 from metax_api.permissions import EndUserPermissions, ServicePermissions +from metax_api.models import ApiError from metax_api.services import ( - ApiErrorService, CallableService, CommonService as CS, RedisCacheService, @@ -137,7 +137,9 @@ def handle_exception(self, exc): ) if type(exc) not in (Http403, Http404, PermissionDenied, MethodNotAllowed): - ApiErrorService.store_error_details(self.request, response, exc) + # ApiErrorViewSet.store_error_details(self.request, response, exc) + # luo serializer + ApiError.objects.store_error_details(self.request, response, exc) return response @@ -355,7 +357,8 @@ def _check_and_store_bulk_error(self, request, response): and save data if necessary. """ if "failed" in response.data and len(response.data["failed"]): - ApiErrorService.store_error_details(request, response, other={"bulk_request": True}) + # ApiErrorViewSet.store_error_details(request, response, other={"bulk_request": True}) + ApiError.objects.store_error_details(request, response, other={"bulk_request": True}) def get_api_name(self): """ diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index 47b03a18..46e703d5 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -30,7 +30,7 @@ FileViewSet, SchemaViewSet, ) -from metax_api.api.rest.v2.views import ApiErrorViewSet +from metax_api.api.rest.v2.views import ApiErrorViewSetV2 from .views import DataCatalogViewSet, DatasetViewSet @@ -102,7 +102,7 @@ def __init__(self, *args, **kwargs): # v2 urls, using v2 view classes with changes router_v2 = CustomRouterV2(trailing_slash=False) -router_v2.register(r"apierrors/?", ApiErrorViewSet) +router_v2.register(r"apierrors/?", ApiErrorViewSetV2) router_v2.register(r"datasets/?", DatasetViewSet) router_v2.register(r"datacatalogs/?", DataCatalogViewSet) router_v2.register( diff --git a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py index a2b2b31d..95e389f6 100644 --- a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py @@ -1,7 +1,14 @@ from rest_framework.serializers import ModelSerializer -class ApiErrorSerializerV2(ModelSerializer): +from metax_api.models import ApiError - def __init__(self, *args, **kwargs): +class ApiErrorSerializerV2(ModelSerializer): + class Meta: + model = ApiError - super(ApiErrorSerializerV2, self).__init__(*args, **kwargs) \ No newline at end of file + fields = ( + "id", + "identifier", + "error", + "date_created" + ) \ No newline at end of file diff --git a/src/metax_api/api/rest/v2/views/__init__.py b/src/metax_api/api/rest/v2/views/__init__.py index 10bcf92f..eeb06648 100755 --- a/src/metax_api/api/rest/v2/views/__init__.py +++ b/src/metax_api/api/rest/v2/views/__init__.py @@ -7,4 +7,4 @@ from .data_catalog_view import DataCatalogViewSet from .dataset_view import DatasetViewSet -from .api_error_view import ApiErrorViewSet +from .api_error_view import ApiErrorViewSetV2 diff --git a/src/metax_api/api/rest/v2/views/api_error_view.py b/src/metax_api/api/rest/v2/views/api_error_view.py index 2371d271..0f9941ab 100644 --- a/src/metax_api/api/rest/v2/views/api_error_view.py +++ b/src/metax_api/api/rest/v2/views/api_error_view.py @@ -1,17 +1,78 @@ -from metax_api.permissions import ServicePermissions +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + +import logging + +from django.http import Http404 +from rest_framework.decorators import action +from rest_framework.response import Response + from metax_api.api.rest.base.views import ApiErrorViewSet +from metax_api.models import ApiError +from metax_api.permissions import ServicePermissions -class ApiErrorViewSet(ApiErrorViewSet): +from ..serializers.api_error_serializer import ApiErrorSerializerV2 + +_logger = logging.getLogger(__name__) + +class ApiErrorViewSetV2(ApiErrorViewSet): - api_type = "rest" - authentication_classes = () permission_classes = [ServicePermissions] + queryset = ApiError.objects.all() + serializer_class = ApiErrorSerializerV2 + + # do views need serializing? + + @action(detail=False, methods=["post"], url_path="flush") + def flush_errors(self, request): + """ + Delete all errors from database. + """ + _logger.info("%s called by %s" % (request.META["PATH_INFO"], request.user.username)) + errors = ApiError.objects.all() + errors_deleted_count = len(errors) + errors.delete() + return Response(data={"errors_deleted": errors_deleted_count}, status=200) + + def destroy(self, request, *args, **kwargs): + """ + Delete a single error from database. + """ + _logger.info("DELETE %s called by %s" % (request.META["PATH_INFO"], request.user.username)) + error = ApiError.objects.get(identifier=kwargs["pk"]) + error.delete() + return Response(status=204) - def __init__(self, *args, **kwargs): + def retrieve(self, request, *args, **kwargs): + """ + Retrieve complete data about a single error. + """ + try: + error_details = ApiError.objects.get(identifier=kwargs["pk"]) + except: + raise Http404 + return Response(data=error_details, status=200) - super(ApiErrorViewSet, self).__init__(*args, **kwargs) - - # def list(self, request, *args, **kwargs): - # # hae modelista - # error_list = ApiErrorService.retrieve_error_list() - # return Response(data=error_list, status=200) \ No newline at end of file + def list(self, request, *args, **kwargs): + """ + List all errors. Data is cleaned up a bit for easier browsing. + """ + errors = ApiError.objects.all() + error_list = [] + for error in errors: + error_details = error.error + error_details.pop("data", None) + error_details.pop("headers", None) + if len(str(error_details["response"])) > 200: + error_details["response"] = ( + "%s ...(first 200 characters)" % str(error_details["response"])[:200] + ) + error_details["traceback"] = ( + "(last 200 characters) ...%s" % error_details["traceback"][-200:] + ) + error_list.append(error) + return Response(data=error_list, status=200) \ No newline at end of file diff --git a/src/metax_api/models/api_error.py b/src/metax_api/models/api_error.py index 2748ef13..516adf34 100644 --- a/src/metax_api/models/api_error.py +++ b/src/metax_api/models/api_error.py @@ -1,6 +1,92 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + +import logging +import traceback +from uuid import uuid4 + from django.db.models import JSONField from django.db import models +from metax_api.utils import executing_test_case, get_tz_aware_now_without_micros, json_logger + +# from rest_framework.parsers import JSONParser + +_logger = logging.getLogger(__name__) + +class ApiErrorManager(models.Manager): + def store_error_details(self, request, response, exception=None, other={}): + """ + Store error and request details to database. + """ + current_time = str(get_tz_aware_now_without_micros()).replace(" ", "T") + + if request.method in ("POST", "PUT", "PATCH"): + # cast possible datetime objects to strings, because those cant be json-serialized... + request_data = request.data + for date_field in ("date_modified", "date_created"): + if isinstance(request_data, list): + for item in request_data: + if isinstance(item, dict) and date_field in item: + item[date_field] = str(item[date_field]) + elif isinstance(request_data, dict) and date_field in request_data: + request_data[date_field] = str(request_data[date_field]) + else: + pass + else: + request_data = None + + error_info = { + "method": request.method, + "user": request.user.username or "guest", + "data": request_data, + "headers": { + k: v + for k, v in request.META.items() + if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" + }, + "status_code": response.status_code, + "response": response.data, + "traceback": traceback.format_exc(), + # during test case execution, RAW_URI is not set + "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), + "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), + "exception_time": current_time, + } + + if other: + # may contain info that the request was a bulk operation + error_info["other"] = {k: v for k, v in other.items()} + if "bulk_request" in other: + error_info["other"]["data_row_count"] = len(request_data) + + try: + error = self.create(identifier=error_info["identifier"], error=error_info, date_created=current_time) + error.save() + return error + except: + _logger.exception("Failed to save error info...") + else: + response.data["error_identifier"] = error_info["identifier"] + + if response.status_code == 500: + + json_logger.error( + event="api_exception", + error={ + "error_identifier": error_info["identifier"], + "status_code": response.status_code, + "traceback": error_info["traceback"], + }, + ) + + if executing_test_case(): + response.data["traceback"] = traceback.format_exc() + class ApiError(models.Model): id = models.BigAutoField(primary_key=True, editable=False) @@ -8,21 +94,4 @@ class ApiError(models.Model): error = JSONField(null=False) date_created = models.DateTimeField() - # def save(self, *args, **kwargs): - # error_info = { - # "method": request.method, - # "user": request.user.username or "guest", - # "data": request_data, - # "headers": { - # k: v - # for k, v in request.META.items() - # if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" - # }, - # "status_code": response.status_code, - # "response": response.data, - # "traceback": traceback.format_exc(), - # # during test case execution, RAW_URI is not set - # "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), - # "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), - # "exception_time": current_time, - # } \ No newline at end of file + objects = ApiErrorManager() \ No newline at end of file diff --git a/src/metax_api/services/api_error_service.py b/src/metax_api/services/api_error_service.py index a15b1a94..0a37caea 100755 --- a/src/metax_api/services/api_error_service.py +++ b/src/metax_api/services/api_error_service.py @@ -54,6 +54,9 @@ def retrieve_error_list(): error_files = listdir(settings.ERROR_FILES_PATH) error_list = [] + # serializer to representation + # apierrorlistserializer, apierrorsingleserializer + for ef in error_files: with open("%s/%s" % (settings.ERROR_FILES_PATH, ef), "r") as f: error_details = json_load(f) @@ -124,7 +127,7 @@ def store_error_details(request, response, exception=None, other={}): except: _logger.exception("Failed to save error info...") else: - + # jos ei mene exceptiin response.data["error_identifier"] = error_info["identifier"] if response.status_code == 500: From 642233a536ad63f2a952c20f6191beb42d4414a8 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Wed, 23 Jun 2021 13:07:00 +0300 Subject: [PATCH 058/160] CSCFAIRMETA-1093: Fix rabbitmq to use connection for each request - Prevents issues with multiple parallel requests that are publishing to RabbiMQ --- src/metax_api/services/rabbitmq_service.py | 36 +++++++++++++--------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index 1ef10e90..ffd36507 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -28,11 +28,13 @@ def __init__(self): self._settings["USER"], self._settings["PASSWORD"] ) self._hosts = self._settings["HOSTS"] - self._connection = None def _connect(self): - if self._connection and self._connection.is_open: - return + """ + Creates and returns a new BlockingConnection for the caller. Creating a new connection enables multiple + threads (i.e. requests) to access RabbitMQ in parallel using synchronous connection. Even though initializing + a new connection for each request is slow and has some overhead, thread-safety is more important. + """ # Connection retries are needed as long as there is no load balancer in front of rabbitmq-server VMs sleep_time = 1 @@ -51,7 +53,7 @@ def _connect(self): kwarg_params["virtual_host"] = self._settings["VHOST"] conn_params = pika.ConnectionParameters(host, **kwarg_params) - self._connection = pika.BlockingConnection(conn_params) + connection = pika.BlockingConnection(conn_params) except Exception as e: _logger.error( @@ -59,9 +61,9 @@ def _connect(self): ) sleep(sleep_time) else: - self._channel = self._connection.channel() _logger.info("RabbitMQ connected to %s" % host) - break + + return connection else: raise Exception("Unable to connect to RabbitMQ") @@ -79,7 +81,8 @@ def publish(self, body, routing_key="", exchange=None, persistent=True): otherwise messages not retrieved by clients before restart will be lost. (still is not 100 % guaranteed to persist!) """ - self._connect() + connection = self._connect() + channel = connection.channel() self._validate_publish_params(routing_key, exchange) additional_args = {} @@ -95,7 +98,7 @@ def publish(self, body, routing_key="", exchange=None, persistent=True): for message in messages: if isinstance(message, dict): message = json_dumps(message, cls=DjangoJSONEncoder) - self._channel.basic_publish( + channel.basic_publish( body=message, routing_key=routing_key, exchange=exchange, @@ -106,7 +109,7 @@ def publish(self, body, routing_key="", exchange=None, persistent=True): _logger.error("Unable to publish message to RabbitMQ") raise finally: - self._connection.close() + connection.close() def init_exchanges(self): """ @@ -114,18 +117,23 @@ def init_exchanges(self): an error will occur if an exchange existed, and it is being re-declared with different settings. In that case the exchange has to be manually removed first, which can result in lost messages. """ - self._connect() + connection = self._connect() + + if connection is None: + return + + channel = connection.channel() try: for exchange in self._settings["EXCHANGES"]: - self._channel.exchange_declare( + channel.exchange_declare( exchange["NAME"], exchange_type=exchange["TYPE"], durable=exchange["DURABLE"], ) for queue in exchange.get("QUEUES", []): # declare queues in settings - self._channel.queue_declare(queue["NAME"], durable=exchange["DURABLE"]) - self._channel.queue_bind( + channel.queue_declare(queue["NAME"], durable=exchange["DURABLE"]) + channel.queue_bind( queue["NAME"], exchange["NAME"], queue.get("ROUTING_KEY") ) except Exception as e: @@ -133,7 +141,7 @@ def init_exchanges(self): _logger.exception("Failed to initialize RabbitMQ exchanges") raise finally: - self._connection.close() + connection.close() def _validate_publish_params(self, routing_key, exchange_name): """ From 59bb1f592898ccac379ef0a288fc0db3bf507f4d Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Wed, 23 Jun 2021 13:29:32 +0300 Subject: [PATCH 059/160] CSCFAIRMETA-1093: Add ApiError handling from rabbitmq - ApiErrors are sent to rabbitmq queue that is consumed later to create the ApiErrors to db --- .../api/rest/base/views/common_view.py | 9 +++- .../v2/serializers/api_error_serializer.py | 52 ++++++++++++++++++- src/metax_api/onappstart.py | 1 + src/metax_api/services/rabbitmq_service.py | 30 ++++++++++- src/metax_api/settings/components/rabbitmq.py | 10 ++++ src/metax_api/signals/__init__.py | 1 + src/metax_api/signals/request_finished.py | 10 ++++ 7 files changed, 110 insertions(+), 3 deletions(-) create mode 100644 src/metax_api/signals/__init__.py create mode 100644 src/metax_api/signals/request_finished.py diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index d31dc43d..31f4ec27 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -16,12 +16,14 @@ from rest_framework.views import set_rollback from rest_framework.viewsets import ModelViewSet +from metax_api.api.rest.v2.serializers import ApiErrorSerializerV2 from metax_api.exceptions import Http400, Http403, Http500 from metax_api.permissions import EndUserPermissions, ServicePermissions from metax_api.models import ApiError from metax_api.services import ( CallableService, CommonService as CS, + RabbitMQService as rabbitmq, RedisCacheService, ) @@ -139,7 +141,12 @@ def handle_exception(self, exc): if type(exc) not in (Http403, Http404, PermissionDenied, MethodNotAllowed): # ApiErrorViewSet.store_error_details(self.request, response, exc) # luo serializer - ApiError.objects.store_error_details(self.request, response, exc) + try: + error_json = ApiErrorSerializerV2.to_rabbitmq_json(self.request, response) + response.data["error_identifier"] = error_json["identifier"] + rabbitmq.publish(error_json, exchange="apierrors") + except Exception as e: + _logger.error(f"could not send api error to rabbitmq. Error: {e}") return response diff --git a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py index 95e389f6..9960bde1 100644 --- a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py @@ -1,6 +1,11 @@ +import traceback +from uuid import uuid4 + from rest_framework.serializers import ModelSerializer from metax_api.models import ApiError +from metax_api.utils import get_tz_aware_now_without_micros + class ApiErrorSerializerV2(ModelSerializer): class Meta: @@ -11,4 +16,49 @@ class Meta: "identifier", "error", "date_created" - ) \ No newline at end of file + ) + + @staticmethod + def to_rabbitmq_json(request, response, other={}): + current_time = str(get_tz_aware_now_without_micros()).replace(" ", "T") + + if request.method in ("POST", "PUT", "PATCH"): + # cast possible datetime objects to strings, because those cant be json-serialized... + request_data = request.data + for date_field in ("date_modified", "date_created"): + if isinstance(request_data, list): + for item in request_data: + if isinstance(item, dict) and date_field in item: + item[date_field] = str(item[date_field]) + elif isinstance(request_data, dict) and date_field in request_data: + request_data[date_field] = str(request_data[date_field]) + else: + pass + else: + request_data = None + + error_info = { + "method": request.method, + "user": request.user.username or "guest", + "data": request_data, + "headers": { + k: v + for k, v in request.META.items() + if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" + }, + "status_code": response.status_code, + "response": response.data, + "traceback": traceback.format_exc(), + # during test case execution, RAW_URI is not set + "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), + "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), + "exception_time": current_time, + } + + if other: + # may contain info that the request was a bulk operation + error_info["other"] = {k: v for k, v in other.items()} + if "bulk_request" in other: + error_info["other"]["data_row_count"] = len(request_data) + + return error_info diff --git a/src/metax_api/onappstart.py b/src/metax_api/onappstart.py index 47816fc7..2a0346c6 100755 --- a/src/metax_api/onappstart.py +++ b/src/metax_api/onappstart.py @@ -40,6 +40,7 @@ def ready(self): # pragma: no cover # because the "django apps" have not been loaded yet. import json + import metax_api.signals # noqa from metax_api.services import RabbitMQService as rabbitmq from metax_api.services.redis_cache_service import RedisClient diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index ffd36507..5140dfa2 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -7,13 +7,15 @@ import logging import random -from json import dumps as json_dumps +from json import dumps as json_dumps, loads from time import sleep import pika +from django.db import DatabaseError from django.conf import settings from django.core.serializers.json import DjangoJSONEncoder +from metax_api.models import ApiError from metax_api.utils.utils import executing_test_case _logger = logging.getLogger(__name__) @@ -111,6 +113,29 @@ def publish(self, body, routing_key="", exchange=None, persistent=True): finally: connection.close() + def consume_api_errors(self): + connection = self._connect() + channel = connection.channel() + + try: + for method, _, body in channel.consume("metax-apierrors", inactivity_timeout=1): + if method is None and body is None: + channel.cancel() + break + try: + error = loads(body) + ApiError.objects.create(identifier=error["identifier"], error=error) + except DatabaseError as e: + _logger.error("cannot create API Error. Discarding..") + _logger.debug(f"error: {e}") + finally: + channel.basic_ack(method.delivery_tag) + except Exception as e: + _logger.error(e) + finally: + _logger.debug("All ApiErrors were handled") + connection.close() + def init_exchanges(self): """ Declare the exchanges specified in settings. Re-declaring existing exchanges does no harm, but @@ -178,6 +203,9 @@ def publish(self, body, routing_key="", exchange="datasets", persistent=True): def init_exchanges(self, *args, **kwargs): pass + def consume_api_errors(self): + pass + if executing_test_case(): RabbitMQService = _RabbitMQServiceDummy() diff --git a/src/metax_api/settings/components/rabbitmq.py b/src/metax_api/settings/components/rabbitmq.py index 4e4a2f37..f4d04108 100755 --- a/src/metax_api/settings/components/rabbitmq.py +++ b/src/metax_api/settings/components/rabbitmq.py @@ -23,6 +23,16 @@ } ], }, + { + "NAME": "apierrors", + "TYPE": "fanout", + "DURABLE": True, + "QUEUES": [ + { + "NAME": "metax-apierrors" + } + ] + } ], } RABBIT_MQ_USE_VHOST = env("RABBIT_MQ_USE_VHOST") diff --git a/src/metax_api/signals/__init__.py b/src/metax_api/signals/__init__.py new file mode 100644 index 00000000..9bb77660 --- /dev/null +++ b/src/metax_api/signals/__init__.py @@ -0,0 +1 @@ +from .request_finished import * \ No newline at end of file diff --git a/src/metax_api/signals/request_finished.py b/src/metax_api/signals/request_finished.py new file mode 100644 index 00000000..29020c8d --- /dev/null +++ b/src/metax_api/signals/request_finished.py @@ -0,0 +1,10 @@ +from django.core.signals import request_finished +from django.dispatch import receiver + +from metax_api.services.rabbitmq_service import RabbitMQService + + +@receiver(request_finished) +def handle_exceptions(sender, **kwargs): + # Request has already been sent back so stuff done here are not prolonging the request handling + RabbitMQService.consume_api_errors() From 9322504db84fb8981648553e5b38a0afb14f656d Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Wed, 23 Jun 2021 13:33:31 +0300 Subject: [PATCH 060/160] CSCFAIRMETA-1093: [FIX] add default value for ApiError date created --- src/metax_api/migrations/0034_apierror.py | 3 ++- src/metax_api/models/api_error.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/metax_api/migrations/0034_apierror.py b/src/metax_api/migrations/0034_apierror.py index fa8ad746..7329870a 100644 --- a/src/metax_api/migrations/0034_apierror.py +++ b/src/metax_api/migrations/0034_apierror.py @@ -1,5 +1,6 @@ # Generated by Django 3.1.8 on 2021-06-04 07:03 +from datetime import datetime from django.db import migrations, models @@ -16,7 +17,7 @@ class Migration(migrations.Migration): ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), ('identifier', models.CharField(max_length=200, unique=True)), ('error', models.JSONField()), - ('date_created', models.DateTimeField()), + ('date_created', models.DateTimeField(default=datetime.now)), ], ), ] diff --git a/src/metax_api/models/api_error.py b/src/metax_api/models/api_error.py index 516adf34..f986d97e 100644 --- a/src/metax_api/models/api_error.py +++ b/src/metax_api/models/api_error.py @@ -92,6 +92,6 @@ class ApiError(models.Model): id = models.BigAutoField(primary_key=True, editable=False) identifier = models.CharField(max_length=200, unique=True, null=False) error = JSONField(null=False) - date_created = models.DateTimeField() + date_created = models.DateTimeField(default=get_tz_aware_now_without_micros) - objects = ApiErrorManager() \ No newline at end of file + objects = ApiErrorManager() From 34df64dd1aa94ceb4b739adbcd76724883673381 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Tue, 29 Jun 2021 14:49:16 +0300 Subject: [PATCH 061/160] CSCFAIRMETA-1093: add rabbitmq mocking for apierrors --- .../api/rest/base/views/apierrors/read.py | 40 +++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/src/metax_api/tests/api/rest/base/views/apierrors/read.py b/src/metax_api/tests/api/rest/base/views/apierrors/read.py index a3338f29..640eb65d 100755 --- a/src/metax_api/tests/api/rest/base/views/apierrors/read.py +++ b/src/metax_api/tests/api/rest/base/views/apierrors/read.py @@ -5,14 +5,14 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT import logging -from os import makedirs -from shutil import rmtree +from unittest.mock import patch +from uuid import uuid4 -from django.conf import settings from django.core.management import call_command from rest_framework import status from rest_framework.test import APITestCase +from metax_api.models import ApiError from metax_api.tests.utils import TestClassUtils, test_data_file_path, testcase_log_console _logger = logging.getLogger(__name__) @@ -34,10 +34,35 @@ def setUpClass(cls): def setUp(self): super(ApiErrorReadBasicTests, self).setUp() - rmtree(settings.ERROR_FILES_PATH, ignore_errors=True) - makedirs(settings.ERROR_FILES_PATH) self._use_http_authorization(username="metax") + def mock_api_error_consume(self): + """ + ApiErrors are created when fetching Rabbitmq queue but when running testcases Rabbitmq is not available. + This mocks the consume part while the publishing part is actually not doing anything. + """ + error = { + "method": "POST", + "user": "metax", + "data": { "metadata_owner_org": "abc-org-123" }, + "headers": { + "HTTP_COOKIE": "" + }, + "status_code": 400, + "response": { + "data_catalog": [ + "ErrorDetail(string=This field is required., code=required)" + ] + }, + "traceback": + "Traceback(most recent call last): File/usr/local/lib/python3.8/site-packages/rest_framework/views.py", + "url": "/rest/datasets", + "identifier": f"2021-06-29T11:10:54-{str(uuid4())[:8]}", + "exception_time": "2021-06-29T11:10:54+00:00" + } + + ApiError.objects.create(identifier=error["identifier"], error=error) + def _assert_fields_presence(self, response): """ Check presence and absence of some key information. @@ -70,6 +95,7 @@ def test_list_errors(self): response = self.client.get("/rest/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + @patch("metax_api.services.rabbitmq_service._RabbitMQServiceDummy.consume_api_errors", mock_api_error_consume) def test_get_error_details(self): cr_1 = self.client.get("/rest/datasets/1").data cr_1.pop("id") @@ -81,11 +107,11 @@ def test_get_error_details(self): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # list errors in order to get error identifier - response = self.client.get("/rest/apierrors") + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual("identifier" in response.data[0], True, response.data) - response = self.client.get("/rest/apierrors/%s" % response.data[0]["identifier"]) + response = self.client.get("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) self._assert_fields_presence(response) self.assertEqual( "data_catalog" in response.data["response"], True, response.data["response"] From 72b663944bce9982e4b74aed9e6f32bc8bff8164 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Tue, 29 Jun 2021 16:43:40 +0300 Subject: [PATCH 062/160] CSCFAIRMETA-1093: Fix apierror assertions to match the model --- .../api/rest/base/views/apierrors/read.py | 33 ++++++++----------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/src/metax_api/tests/api/rest/base/views/apierrors/read.py b/src/metax_api/tests/api/rest/base/views/apierrors/read.py index 640eb65d..4f9ada13 100755 --- a/src/metax_api/tests/api/rest/base/views/apierrors/read.py +++ b/src/metax_api/tests/api/rest/base/views/apierrors/read.py @@ -63,19 +63,19 @@ def mock_api_error_consume(self): ApiError.objects.create(identifier=error["identifier"], error=error) - def _assert_fields_presence(self, response): + def _assert_fields_presence(self, error_json): """ Check presence and absence of some key information. """ - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("data" in response.data, True, response.data) - self.assertEqual("response" in response.data, True, response.data) - self.assertEqual("traceback" in response.data, True, response.data) - self.assertEqual("url" in response.data, True, response.data) + self.assertEqual("identifier" in error_json, True, error_json) + self.assertEqual("data" in error_json, True, error_json) + self.assertEqual("response" in error_json, True, error_json) + self.assertEqual("traceback" in error_json, True, error_json) + self.assertEqual("url" in error_json, True, error_json) self.assertEqual( - "HTTP_AUTHORIZATION" in response.data["headers"], + "HTTP_AUTHORIZATION" in error_json["headers"], False, - response.data["headers"], + error_json["headers"], ) def test_list_errors(self): @@ -107,20 +107,13 @@ def test_get_error_details(self): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) # list errors in order to get error identifier - response = self.client.get("/rest/v2/apierrors") + response = self.client.get("/rest/v2/apierrors/1") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual("identifier" in response.data[0], True, response.data) - response = self.client.get("/rest/v2/apierrors/%s" % response.data[0]["identifier"]) - self._assert_fields_presence(response) - self.assertEqual( - "data_catalog" in response.data["response"], True, response.data["response"] - ) - self.assertEqual( - response.data["data"]["research_dataset"]["title"]["en"], - "Abc", - response.data["data"]["research_dataset"]["title"], - ) + error_json = response.data["error"] + + self._assert_fields_presence(error_json) + self.assertTrue("data_catalog" in error_json["response"], error_json["response"]) @testcase_log_console(_logger) def test_delete_error_details(self): From aeb0c8ba5e6c53d3df496835437e9fe8cb4634ed Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Thu, 1 Jul 2021 11:47:37 +0300 Subject: [PATCH 063/160] fixed api error handling for v2, added tests and deleted v1 api error files --- src/metax_api/api/rest/base/router.py | 4 +- src/metax_api/api/rest/base/views/__init__.py | 1 - .../api/rest/base/views/api_error_view.py | 84 ---------- .../api/rest/base/views/common_view.py | 13 +- .../v2/serializers/api_error_serializer.py | 9 +- .../api/rest/v2/views/api_error_view.py | 69 +++++++-- src/metax_api/models/api_error.py | 81 +--------- src/metax_api/services/__init__.py | 1 - src/metax_api/services/api_error_service.py | 145 ------------------ src/metax_api/services/rabbitmq_service.py | 2 +- .../{base => v2}/views/apierrors/__init__.py | 0 .../rest/{base => v2}/views/apierrors/read.py | 81 ++++++---- 12 files changed, 123 insertions(+), 367 deletions(-) delete mode 100755 src/metax_api/api/rest/base/views/api_error_view.py delete mode 100755 src/metax_api/services/api_error_service.py rename src/metax_api/tests/api/rest/{base => v2}/views/apierrors/__init__.py (100%) rename src/metax_api/tests/api/rest/{base => v2}/views/apierrors/read.py (67%) diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index 146ff0a1..742103a3 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -23,7 +23,6 @@ from rest_framework.routers import DefaultRouter, Route from .views import ( - ApiErrorViewSet, ContractViewSet, DataCatalogViewSet, DatasetViewSet, @@ -32,6 +31,7 @@ FileViewSet, SchemaViewSet, ) +from metax_api.api.rest.v2.views import ApiErrorViewSetV2 class CustomRouter(DefaultRouter): @@ -67,7 +67,7 @@ def get_default_basename(self, viewset): router = CustomRouter(trailing_slash=False) -router.register(r"apierrors/?", ApiErrorViewSet) +router.register(r"apierrors/?", ApiErrorViewSetV2) router.register(r"contracts/?", ContractViewSet) router.register(r"datasets/?", DatasetViewSet) router.register(r"datacatalogs/?", DataCatalogViewSet) diff --git a/src/metax_api/api/rest/base/views/__init__.py b/src/metax_api/api/rest/base/views/__init__.py index 760b837e..d5c62dd9 100755 --- a/src/metax_api/api/rest/base/views/__init__.py +++ b/src/metax_api/api/rest/base/views/__init__.py @@ -5,7 +5,6 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from .api_error_view import ApiErrorViewSet from .common_view import CommonViewSet from .contract_view import ContractViewSet from .data_catalog_view import DataCatalogViewSet diff --git a/src/metax_api/api/rest/base/views/api_error_view.py b/src/metax_api/api/rest/base/views/api_error_view.py deleted file mode 100755 index 1fd58da1..00000000 --- a/src/metax_api/api/rest/base/views/api_error_view.py +++ /dev/null @@ -1,84 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -import logging - -from django.http import Http404 -from rest_framework.decorators import action -from rest_framework.response import Response - -from metax_api.exceptions import Http403, Http501 -from metax_api.services import ApiErrorService - -from ..serializers import FileSerializer -from .common_view import CommonViewSet - -""" -An API to browse error files and retrieve complete error details. - -Allows only reading and deleting by user 'metax', inaccessible to everyone else. - -Not connected to the DB in any way, deals directly with files saved in the designated -error file location. -""" - - -_logger = logging.getLogger(__name__) - - -class ApiErrorViewSet(CommonViewSet): - - filter_backends = () - - # this serves no purpose, but strangely in local dev browsable api for /rest/apierrors/pid - # works fine, while in metax-test it will throw an error complaining about a missing serializer. - serializer_class = FileSerializer - - def initial(self, request, *args, **kwargs): - if request.user.username != "metax": - raise Http403 - return super(ApiErrorViewSet, self).initial(request, *args, **kwargs) - - def get_queryset(self): - return self.list(None) - - def retrieve(self, request, *args, **kwargs): - try: - error_details = ApiErrorService.retrieve_error_details(kwargs["pk"]) - except: - raise Http404 - return Response(data=error_details, status=200) - - def list(self, request, *args, **kwargs): - error_list = ApiErrorService.retrieve_error_list() - return Response(data=error_list, status=200) - - def destroy(self, request, *args, **kwargs): - _logger.info("DELETE %s called by %s" % (request.META["PATH_INFO"], request.user.username)) - ApiErrorService.remove_error_file(kwargs["pk"]) - return Response(status=204) - - @action(detail=False, methods=["post"], url_path="flush") - def flush_errors(self, request): - _logger.info("%s called by %s" % (request.META["PATH_INFO"], request.user.username)) - files_deleted_count = ApiErrorService.flush_errors() - return Response(data={"files_deleted": files_deleted_count}, status=200) - - def update(self, request, *args, **kwargs): - raise Http501() - - def update_bulk(self, request, *args, **kwargs): - raise Http501() - - def partial_update(self, request, *args, **kwargs): - raise Http501() - - def partial_update_bulk(self, request, *args, **kwargs): - raise Http501() - - def create(self, request, *args, **kwargs): - raise Http501() diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index 31f4ec27..ce31ee14 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -19,7 +19,6 @@ from metax_api.api.rest.v2.serializers import ApiErrorSerializerV2 from metax_api.exceptions import Http400, Http403, Http500 from metax_api.permissions import EndUserPermissions, ServicePermissions -from metax_api.models import ApiError from metax_api.services import ( CallableService, CommonService as CS, @@ -139,10 +138,8 @@ def handle_exception(self, exc): ) if type(exc) not in (Http403, Http404, PermissionDenied, MethodNotAllowed): - # ApiErrorViewSet.store_error_details(self.request, response, exc) - # luo serializer try: - error_json = ApiErrorSerializerV2.to_rabbitmq_json(self.request, response) + error_json = ApiErrorSerializerV2.request_to_json(self.request, response) response.data["error_identifier"] = error_json["identifier"] rabbitmq.publish(error_json, exchange="apierrors") except Exception as e: @@ -364,8 +361,12 @@ def _check_and_store_bulk_error(self, request, response): and save data if necessary. """ if "failed" in response.data and len(response.data["failed"]): - # ApiErrorViewSet.store_error_details(request, response, other={"bulk_request": True}) - ApiError.objects.store_error_details(request, response, other={"bulk_request": True}) + try: + error_json = ApiErrorSerializerV2.request_to_json(self.request, response, other={"bulk_request": True}) + response.data["error_identifier"] = error_json["identifier"] + rabbitmq.publish(error_json, exchange="apierrors") + except Exception as e: + _logger.error(f"could not send api error to rabbitmq. Error: {e}") def get_api_name(self): """ diff --git a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py index 9960bde1..3e052674 100644 --- a/src/metax_api/api/rest/v2/serializers/api_error_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/api_error_serializer.py @@ -1,3 +1,10 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + import traceback from uuid import uuid4 @@ -19,7 +26,7 @@ class Meta: ) @staticmethod - def to_rabbitmq_json(request, response, other={}): + def request_to_json(request, response, other={}): current_time = str(get_tz_aware_now_without_micros()).replace(" ", "T") if request.method in ("POST", "PUT", "PATCH"): diff --git a/src/metax_api/api/rest/v2/views/api_error_view.py b/src/metax_api/api/rest/v2/views/api_error_view.py index 0f9941ab..39fa9c12 100644 --- a/src/metax_api/api/rest/v2/views/api_error_view.py +++ b/src/metax_api/api/rest/v2/views/api_error_view.py @@ -8,10 +8,12 @@ import logging from django.http import Http404 +from rest_framework import status from rest_framework.decorators import action from rest_framework.response import Response -from metax_api.api.rest.base.views import ApiErrorViewSet +from metax_api.api.rest.base.views import CommonViewSet +from metax_api.exceptions import Http403, Http501 from metax_api.models import ApiError from metax_api.permissions import ServicePermissions @@ -19,13 +21,16 @@ _logger = logging.getLogger(__name__) -class ApiErrorViewSetV2(ApiErrorViewSet): +class ApiErrorViewSetV2(CommonViewSet): permission_classes = [ServicePermissions] queryset = ApiError.objects.all() serializer_class = ApiErrorSerializerV2 - # do views need serializing? + def initial(self, request, *args, **kwargs): + if request.user.username != "metax": + raise Http403 + return super().initial(request, *args, **kwargs) @action(detail=False, methods=["post"], url_path="flush") def flush_errors(self, request): @@ -43,19 +48,35 @@ def destroy(self, request, *args, **kwargs): Delete a single error from database. """ _logger.info("DELETE %s called by %s" % (request.META["PATH_INFO"], request.user.username)) - error = ApiError.objects.get(identifier=kwargs["pk"]) - error.delete() - return Response(status=204) + try: + error = ApiError.objects.get(pk=kwargs["pk"]) + error.delete() + return Response(status=204) + except: + pass + try: + error = ApiError.objects.get(identifier=kwargs["pk"]) + error.delete() + return Response(status=204) + except: + raise Http404 def retrieve(self, request, *args, **kwargs): """ Retrieve complete data about a single error. """ + try: + error_details = ApiError.objects.get(pk=kwargs["pk"]) + serializer = ApiErrorSerializerV2(error_details) + return Response(data=serializer.data, status=200) + except: + pass try: error_details = ApiError.objects.get(identifier=kwargs["pk"]) + serializer = ApiErrorSerializerV2(error_details) + return Response(data=serializer.data, status=200) except: raise Http404 - return Response(data=error_details, status=200) def list(self, request, *args, **kwargs): """ @@ -67,12 +88,32 @@ def list(self, request, *args, **kwargs): error_details = error.error error_details.pop("data", None) error_details.pop("headers", None) - if len(str(error_details["response"])) > 200: - error_details["response"] = ( - "%s ...(first 200 characters)" % str(error_details["response"])[:200] + if error_details.get("response") is not None: + if len(str(error_details["response"])) > 200: + print(error_details["response"]) + error_details["response"] = ( + "%s ...(first 200 characters)" % str(error_details["response"])[:200] + ) + print(error_details["response"]) + if error_details.get("traceback") is not None: + error_details["traceback"] = ( + "(last 200 characters) ...%s" % error_details["traceback"][-200:] ) - error_details["traceback"] = ( - "(last 200 characters) ...%s" % error_details["traceback"][-200:] - ) error_list.append(error) - return Response(data=error_list, status=200) \ No newline at end of file + serializer = ApiErrorSerializerV2(error_list, many=True) + return Response(data=serializer.data, status=status.HTTP_200_OK) + + def update(self, request, *args, **kwargs): + raise Http501() + + def update_bulk(self, request, *args, **kwargs): + raise Http501() + + def partial_update(self, request, *args, **kwargs): + raise Http501() + + def partial_update_bulk(self, request, *args, **kwargs): + raise Http501() + + def create(self, request, *args, **kwargs): + raise Http501() \ No newline at end of file diff --git a/src/metax_api/models/api_error.py b/src/metax_api/models/api_error.py index f986d97e..a465a6e5 100644 --- a/src/metax_api/models/api_error.py +++ b/src/metax_api/models/api_error.py @@ -6,92 +6,17 @@ # :license: MIT import logging -import traceback -from uuid import uuid4 -from django.db.models import JSONField from django.db import models +from django.db.models import JSONField -from metax_api.utils import executing_test_case, get_tz_aware_now_without_micros, json_logger - -# from rest_framework.parsers import JSONParser +from metax_api.utils import get_tz_aware_now_without_micros _logger = logging.getLogger(__name__) -class ApiErrorManager(models.Manager): - def store_error_details(self, request, response, exception=None, other={}): - """ - Store error and request details to database. - """ - current_time = str(get_tz_aware_now_without_micros()).replace(" ", "T") - - if request.method in ("POST", "PUT", "PATCH"): - # cast possible datetime objects to strings, because those cant be json-serialized... - request_data = request.data - for date_field in ("date_modified", "date_created"): - if isinstance(request_data, list): - for item in request_data: - if isinstance(item, dict) and date_field in item: - item[date_field] = str(item[date_field]) - elif isinstance(request_data, dict) and date_field in request_data: - request_data[date_field] = str(request_data[date_field]) - else: - pass - else: - request_data = None - - error_info = { - "method": request.method, - "user": request.user.username or "guest", - "data": request_data, - "headers": { - k: v - for k, v in request.META.items() - if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" - }, - "status_code": response.status_code, - "response": response.data, - "traceback": traceback.format_exc(), - # during test case execution, RAW_URI is not set - "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), - "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), - "exception_time": current_time, - } - - if other: - # may contain info that the request was a bulk operation - error_info["other"] = {k: v for k, v in other.items()} - if "bulk_request" in other: - error_info["other"]["data_row_count"] = len(request_data) - - try: - error = self.create(identifier=error_info["identifier"], error=error_info, date_created=current_time) - error.save() - return error - except: - _logger.exception("Failed to save error info...") - else: - response.data["error_identifier"] = error_info["identifier"] - - if response.status_code == 500: - - json_logger.error( - event="api_exception", - error={ - "error_identifier": error_info["identifier"], - "status_code": response.status_code, - "traceback": error_info["traceback"], - }, - ) - - if executing_test_case(): - response.data["traceback"] = traceback.format_exc() - class ApiError(models.Model): id = models.BigAutoField(primary_key=True, editable=False) identifier = models.CharField(max_length=200, unique=True, null=False) error = JSONField(null=False) - date_created = models.DateTimeField(default=get_tz_aware_now_without_micros) - - objects = ApiErrorManager() + date_created = models.DateTimeField(default=get_tz_aware_now_without_micros) \ No newline at end of file diff --git a/src/metax_api/services/__init__.py b/src/metax_api/services/__init__.py index f699acc0..63bc45af 100755 --- a/src/metax_api/services/__init__.py +++ b/src/metax_api/services/__init__.py @@ -5,7 +5,6 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT -from .api_error_service import ApiErrorService from .auth_service import AuthService from .callable_service import CallableService from .catalog_record_service import CatalogRecordService diff --git a/src/metax_api/services/api_error_service.py b/src/metax_api/services/api_error_service.py deleted file mode 100755 index 0a37caea..00000000 --- a/src/metax_api/services/api_error_service.py +++ /dev/null @@ -1,145 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -import logging -import traceback -from json import dump as json_dump, load as json_load -from os import listdir, remove as remove_file -from uuid import uuid4 - -from django.conf import settings - -from metax_api.utils import executing_test_case, get_tz_aware_now_without_micros, json_logger - -_logger = logging.getLogger(__name__) - - -class ApiErrorService: - @staticmethod - def flush_errors(): - """ - Delete all error files. - """ - error_files = listdir(settings.ERROR_FILES_PATH) - file_count = len(error_files) - for ef in error_files: - remove_file("%s/%s" % (settings.ERROR_FILES_PATH, ef)) - return file_count - - @staticmethod - def remove_error_file(error_identifier): - """ - Delete a single error file. - """ - remove_file("%s/%s.json" % (settings.ERROR_FILES_PATH, error_identifier)) - - @staticmethod - def retrieve_error_details(error_identifier): - """ - Retrieve complete data about a single error - """ - with open("%s/%s.json" % (settings.ERROR_FILES_PATH, error_identifier), "r") as f: - return json_load(f) - - @staticmethod - def retrieve_error_list(): - """ - List all error files in the designated error location. Data is cleaned up a bit - for easier browsing. - """ - error_files = listdir(settings.ERROR_FILES_PATH) - error_list = [] - - # serializer to representation - # apierrorlistserializer, apierrorsingleserializer - - for ef in error_files: - with open("%s/%s" % (settings.ERROR_FILES_PATH, ef), "r") as f: - error_details = json_load(f) - error_details.pop("data", None) - error_details.pop("headers", None) - if len(str(error_details["response"])) > 200: - error_details["response"] = ( - "%s ...(first 200 characters)" % str(error_details["response"])[:200] - ) - error_details["traceback"] = ( - "(last 200 characters) ...%s" % error_details["traceback"][-200:] - ) - error_list.append(error_details) - return error_list - - @staticmethod - def store_error_details(request, response, exception=None, other={}): - """ - Store error and request details to disk to specified error file location. - """ - current_time = str(get_tz_aware_now_without_micros()).replace(" ", "T") - - if request.method in ("POST", "PUT", "PATCH"): - # cast possible datetime objects to strings, because those cant be json-serialized... - request_data = request.data - for date_field in ("date_modified", "date_created"): - if isinstance(request_data, list): - for item in request_data: - if isinstance(item, dict) and date_field in item: - item[date_field] = str(item[date_field]) - elif isinstance(request_data, dict) and date_field in request_data: - request_data[date_field] = str(request_data[date_field]) - else: - pass - else: - request_data = None - - error_info = { - "method": request.method, - "user": request.user.username or "guest", - "data": request_data, - "headers": { - k: v - for k, v in request.META.items() - if k.startswith("HTTP_") and k != "HTTP_AUTHORIZATION" - }, - "status_code": response.status_code, - "response": response.data, - "traceback": traceback.format_exc(), - # during test case execution, RAW_URI is not set - "url": request.META.get("RAW_URI", request.META.get("PATH_INFO", "???")), - "identifier": "%s-%s" % (current_time[:19], str(uuid4())[:8]), - "exception_time": current_time, - } - - if other: - # may contain info that the request was a bulk operation - error_info["other"] = {k: v for k, v in other.items()} - if "bulk_request" in other: - error_info["other"]["data_row_count"] = len(request_data) - - try: - with open( - "%s/%s.json" % (settings.ERROR_FILES_PATH, error_info["identifier"]), - "w", - ) as f: - json_dump(error_info, f) - except: - _logger.exception("Failed to save error info...") - else: - # jos ei mene exceptiin - response.data["error_identifier"] = error_info["identifier"] - - if response.status_code == 500: - - json_logger.error( - event="api_exception", - error={ - "error_identifier": error_info["identifier"], - "status_code": response.status_code, - "traceback": error_info["traceback"], - }, - ) - - if executing_test_case(): - response.data["traceback"] = traceback.format_exc() diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index 5140dfa2..9984b079 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -124,6 +124,7 @@ def consume_api_errors(self): break try: error = loads(body) + print(error) ApiError.objects.create(identifier=error["identifier"], error=error) except DatabaseError as e: _logger.error("cannot create API Error. Discarding..") @@ -206,7 +207,6 @@ def init_exchanges(self, *args, **kwargs): def consume_api_errors(self): pass - if executing_test_case(): RabbitMQService = _RabbitMQServiceDummy() else: diff --git a/src/metax_api/tests/api/rest/base/views/apierrors/__init__.py b/src/metax_api/tests/api/rest/v2/views/apierrors/__init__.py similarity index 100% rename from src/metax_api/tests/api/rest/base/views/apierrors/__init__.py rename to src/metax_api/tests/api/rest/v2/views/apierrors/__init__.py diff --git a/src/metax_api/tests/api/rest/base/views/apierrors/read.py b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py similarity index 67% rename from src/metax_api/tests/api/rest/base/views/apierrors/read.py rename to src/metax_api/tests/api/rest/v2/views/apierrors/read.py index 4f9ada13..ce8965e7 100755 --- a/src/metax_api/tests/api/rest/base/views/apierrors/read.py +++ b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py @@ -58,9 +58,12 @@ def mock_api_error_consume(self): "Traceback(most recent call last): File/usr/local/lib/python3.8/site-packages/rest_framework/views.py", "url": "/rest/datasets", "identifier": f"2021-06-29T11:10:54-{str(uuid4())[:8]}", - "exception_time": "2021-06-29T11:10:54+00:00" + "exception_time": "2021-06-29T11:10:54+00:00", + "other": { + "bulk_request": True, + "data_row_count": 2 + } } - ApiError.objects.create(identifier=error["identifier"], error=error) def _assert_fields_presence(self, error_json): @@ -78,36 +81,40 @@ def _assert_fields_presence(self, error_json): error_json["headers"], ) + @patch("metax_api.services.rabbitmq_service._RabbitMQServiceDummy.consume_api_errors", mock_api_error_consume) def test_list_errors(self): """ Each requesting resulting in an error should leave behind one API error entry. """ - cr_1 = self.client.get("/rest/datasets/1").data + cr_1 = self.client.get("/rest/v2/datasets/1").data cr_1.pop("id") cr_1.pop("identifier") cr_1.pop("data_catalog") # causes an error - response = self.client.post("/rest/datasets", cr_1, format="json") + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.post("/rest/datasets", cr_1, format="json") + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get("/rest/apierrors") + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) @patch("metax_api.services.rabbitmq_service._RabbitMQServiceDummy.consume_api_errors", mock_api_error_consume) def test_get_error_details(self): - cr_1 = self.client.get("/rest/datasets/1").data + self.apierror_identifier = f"2021-06-29T11:10:54-{str(uuid4())[:8]}" + + cr_1 = self.client.get("/rest/v2/datasets/1").data cr_1.pop("id") cr_1.pop("identifier") cr_1.pop("data_catalog") # causes an error cr_1["research_dataset"]["title"] = {"en": "Abc"} - response = self.client.post("/rest/datasets", cr_1, format="json") + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - # list errors in order to get error identifier - response = self.client.get("/rest/v2/apierrors/1") + error = ApiError.objects.last() + + response = self.client.get(f"/rest/v2/apierrors/{error.identifier}") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) error_json = response.data["error"] @@ -115,72 +122,78 @@ def test_get_error_details(self): self._assert_fields_presence(error_json) self.assertTrue("data_catalog" in error_json["response"], error_json["response"]) + @patch("metax_api.services.rabbitmq_service._RabbitMQServiceDummy.consume_api_errors", mock_api_error_consume) @testcase_log_console(_logger) def test_delete_error_details(self): - cr_1 = self.client.get("/rest/datasets/1").data + cr_1 = self.client.get("/rest/v2/datasets/1").data cr_1.pop("id") cr_1.pop("identifier") cr_1.pop("data_catalog") # causes an error - response = self.client.post("/rest/datasets", cr_1, format="json") + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get("/rest/apierrors") - response = self.client.delete("/rest/apierrors/%s" % response.data[0]["identifier"]) + error = ApiError.objects.last() + + response = self.client.delete(f"/rest/v2/apierrors/{error.identifier}") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) - response = self.client.get("/rest/apierrors") + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + @patch("metax_api.services.rabbitmq_service._RabbitMQServiceDummy.consume_api_errors", mock_api_error_consume) @testcase_log_console(_logger) def test_delete_all_error_details(self): - cr_1 = self.client.get("/rest/datasets/1").data + cr_1 = self.client.get("/rest/v2/datasets/1").data cr_1.pop("id") cr_1.pop("identifier") cr_1.pop("data_catalog") # causes an error - response = self.client.post("/rest/datasets", cr_1, format="json") + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - response = self.client.post("/rest/datasets", cr_1, format="json") + response = self.client.post("/rest/v2/datasets", cr_1, format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) # ensure something was produced... - response = self.client.get("/rest/apierrors") + response = self.client.get("/rest/v2/apierrors") - response = self.client.post("/rest/apierrors/flush") + response = self.client.post("/rest/v2/apierrors/flush") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get("/rest/apierrors") + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + @patch("metax_api.services.rabbitmq_service._RabbitMQServiceDummy.consume_api_errors", mock_api_error_consume) def test_bulk_operation_produces_error_entry(self): """ Ensure also bulk operations produce error entries. """ - cr_1 = self.client.get("/rest/datasets/1").data + cr_1 = self.client.get("/rest/v2/datasets/1").data cr_1.pop("id") cr_1.pop("identifier") cr_1.pop("data_catalog") # causes an error - response = self.client.post("/rest/datasets", [cr_1, cr_1], format="json") + response = self.client.post("/rest/v2/datasets", [cr_1, cr_1], format="json") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - response = self.client.get("/rest/apierrors") + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - response = self.client.get("/rest/apierrors/%s" % response.data[0]["identifier"]) - self._assert_fields_presence(response) - self.assertEqual("other" in response.data, True, response.data) - self.assertEqual("bulk_request" in response.data["other"], True, response.data) - self.assertEqual("data_row_count" in response.data["other"], True, response.data) + error = ApiError.objects.last() + + self._assert_fields_presence(error.error) + + self.assertEqual("other" in error.error, True, response.data) + self.assertEqual("bulk_request" in error.error["other"], True, response.data) + self.assertEqual("data_row_count" in error.error["other"], True, response.data) def test_api_permitted_only_to_metax_user(self): # uses testuser by default self._use_http_authorization() - response = self.client.get("/rest/apierrors") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.get("/rest/apierrors/123") + response = self.client.get("/rest/v2/apierrors") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.delete("/rest/apierrors/123") + response = self.client.get("/rest/v2/apierrors/123") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) - response = self.client.post("/rest/apierrors/flush_errors") + response = self.client.delete("/rest/v2/apierrors/123") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + response = self.client.post("/rest/v2/apierrors/flush_errors") + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) \ No newline at end of file From 56e6a4e2ee78e00243f38a392aea233ea0409a8d Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Mon, 2 Aug 2021 17:37:07 +0300 Subject: [PATCH 064/160] bug fix: published datasets no longer are saved as apierrors. also rewrote some code in apierrorview file --- .../api/rest/v2/views/api_error_view.py | 35 ++++++++++--------- src/metax_api/models/catalog_record.py | 3 +- src/metax_api/services/rabbitmq_service.py | 1 - src/metax_api/settings/components/rabbitmq.py | 3 ++ 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/src/metax_api/api/rest/v2/views/api_error_view.py b/src/metax_api/api/rest/v2/views/api_error_view.py index 39fa9c12..c0e477d4 100644 --- a/src/metax_api/api/rest/v2/views/api_error_view.py +++ b/src/metax_api/api/rest/v2/views/api_error_view.py @@ -48,34 +48,35 @@ def destroy(self, request, *args, **kwargs): Delete a single error from database. """ _logger.info("DELETE %s called by %s" % (request.META["PATH_INFO"], request.user.username)) + + pk = kwargs["pk"] + error: ApiError try: - error = ApiError.objects.get(pk=kwargs["pk"]) - error.delete() - return Response(status=204) - except: - pass - try: - error = ApiError.objects.get(identifier=kwargs["pk"]) + error = ApiError.objects.filter(pk=int(pk)).first() + except ValueError: + error = ApiError.objects.filter(identifier=pk).first() + + if error: error.delete() return Response(status=204) - except: + else: raise Http404 def retrieve(self, request, *args, **kwargs): """ Retrieve complete data about a single error. """ + pk = kwargs["pk"] + error: ApiError try: - error_details = ApiError.objects.get(pk=kwargs["pk"]) - serializer = ApiErrorSerializerV2(error_details) - return Response(data=serializer.data, status=200) - except: - pass - try: - error_details = ApiError.objects.get(identifier=kwargs["pk"]) - serializer = ApiErrorSerializerV2(error_details) + error = ApiError.objects.filter(pk=int(pk)).first() + except ValueError: + error = ApiError.objects.filter(identifier=pk).first() + + if error: + serializer = ApiErrorSerializerV2(error) return Response(data=serializer.data, status=200) - except: + else: raise Http404 def list(self, request, *args, **kwargs): diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index c6aae125..04ed6a00 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -3088,7 +3088,8 @@ def __call__(self): try: for exchange in settings.RABBITMQ["EXCHANGES"]: - rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) + if exchange["EXC_TYPE"] == "dataset": + rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) except: # note: if we'd like to let the request be a success even if this operation fails, # we could simply not raise an exception here. diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index 9984b079..0e541693 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -124,7 +124,6 @@ def consume_api_errors(self): break try: error = loads(body) - print(error) ApiError.objects.create(identifier=error["identifier"], error=error) except DatabaseError as e: _logger.error("cannot create API Error. Discarding..") diff --git a/src/metax_api/settings/components/rabbitmq.py b/src/metax_api/settings/components/rabbitmq.py index f4d04108..d6f78f7a 100755 --- a/src/metax_api/settings/components/rabbitmq.py +++ b/src/metax_api/settings/components/rabbitmq.py @@ -9,12 +9,14 @@ { "NAME": "datasets", "TYPE": "direct", + "EXC_TYPE": "dataset", # make rabbitmq remember queues after restarts "DURABLE": True, }, { "NAME": "TTV-datasets", "TYPE": "fanout", + "EXC_TYPE": "dataset", "DURABLE": True, "QUEUES": [ { @@ -26,6 +28,7 @@ { "NAME": "apierrors", "TYPE": "fanout", + "EXC_TYPE": "other", "DURABLE": True, "QUEUES": [ { From cfb4a65e730e68eceee6158e110ac6f36f267ca6 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Mon, 9 Aug 2021 18:10:22 +0300 Subject: [PATCH 065/160] Deleted objects are now saved to database using post delete signal --- .../migrations/0035_auto_20210804_1607.py | 29 ++++++++++++ .../0036_remove_deletedobject_identifier.py | 17 +++++++ .../migrations/0037_auto_20210811_1037.py | 18 ++++++++ src/metax_api/models/__init__.py | 1 + src/metax_api/models/catalog_record.py | 4 ++ src/metax_api/models/catalog_record_v2.py | 4 ++ src/metax_api/models/deleted_object.py | 18 ++++++++ src/metax_api/models/file.py | 7 ++- src/metax_api/signals/__init__.py | 1 + src/metax_api/signals/post_delete.py | 27 +++++++++++ src/metax_api/tests/models/__init__.py | 1 + src/metax_api/tests/models/signals.py | 46 +++++++++++++++++++ 12 files changed, 171 insertions(+), 2 deletions(-) create mode 100644 src/metax_api/migrations/0035_auto_20210804_1607.py create mode 100644 src/metax_api/migrations/0036_remove_deletedobject_identifier.py create mode 100644 src/metax_api/migrations/0037_auto_20210811_1037.py create mode 100644 src/metax_api/models/deleted_object.py create mode 100644 src/metax_api/signals/post_delete.py create mode 100644 src/metax_api/tests/models/signals.py diff --git a/src/metax_api/migrations/0035_auto_20210804_1607.py b/src/metax_api/migrations/0035_auto_20210804_1607.py new file mode 100644 index 00000000..56b89cce --- /dev/null +++ b/src/metax_api/migrations/0035_auto_20210804_1607.py @@ -0,0 +1,29 @@ +# Generated by Django 3.1.12 on 2021-08-04 13:07 + +from django.db import migrations, models +import metax_api.utils.utils + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0034_apierror'), + ] + + operations = [ + migrations.CreateModel( + name='DeletedObject', + fields=[ + ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), + ('identifier', models.CharField(max_length=200, unique=True)), + ('model_name', models.CharField(max_length=200)), + ('object_data', models.JSONField()), + ('date_deleted', models.DateTimeField(default=metax_api.utils.utils.get_tz_aware_now_without_micros)), + ], + ), + migrations.AlterField( + model_name='apierror', + name='date_created', + field=models.DateTimeField(default=metax_api.utils.utils.get_tz_aware_now_without_micros), + ), + ] diff --git a/src/metax_api/migrations/0036_remove_deletedobject_identifier.py b/src/metax_api/migrations/0036_remove_deletedobject_identifier.py new file mode 100644 index 00000000..136bef0f --- /dev/null +++ b/src/metax_api/migrations/0036_remove_deletedobject_identifier.py @@ -0,0 +1,17 @@ +# Generated by Django 3.1.12 on 2021-08-06 10:11 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0035_auto_20210804_1607'), + ] + + operations = [ + migrations.RemoveField( + model_name='deletedobject', + name='identifier', + ), + ] diff --git a/src/metax_api/migrations/0037_auto_20210811_1037.py b/src/metax_api/migrations/0037_auto_20210811_1037.py new file mode 100644 index 00000000..b7798fc2 --- /dev/null +++ b/src/metax_api/migrations/0037_auto_20210811_1037.py @@ -0,0 +1,18 @@ +# Generated by Django 3.1.12 on 2021-08-11 07:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0036_remove_deletedobject_identifier'), + ] + + operations = [ + migrations.AlterField( + model_name='deletedobject', + name='id', + field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), + ), + ] diff --git a/src/metax_api/models/__init__.py b/src/metax_api/models/__init__.py index ebd576a7..ec08fb44 100755 --- a/src/metax_api/models/__init__.py +++ b/src/metax_api/models/__init__.py @@ -11,6 +11,7 @@ from .common import Common from .contract import Contract from .data_catalog import DataCatalog +from .deleted_object import DeletedObject from .directory import Directory from .file import File from .file_storage import FileStorage diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 04ed6a00..7f303595 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -1155,6 +1155,10 @@ def _path_included_in_previous_metadata_version(self, project, path): ) def delete(self, *args, **kwargs): + if kwargs.get("hard"): + super().delete() + return self.id + if self.state == self.STATE_DRAFT: _logger.info("Deleting draft dataset %s permanently" % self.identifier) diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index d6f97767..ceecef15 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -83,6 +83,10 @@ def save(self, *args, **kwargs): self._post_update_operations() def delete(self, *args, **kwargs): + if kwargs.get("hard"): + super().delete(*args, **kwargs) + return self.id + if self.next_draft: self.next_draft.delete() self.next_draft = None diff --git a/src/metax_api/models/deleted_object.py b/src/metax_api/models/deleted_object.py new file mode 100644 index 00000000..f73f38c5 --- /dev/null +++ b/src/metax_api/models/deleted_object.py @@ -0,0 +1,18 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + +from django.db import models +from django.db.models import JSONField + +from metax_api.utils import get_tz_aware_now_without_micros + + +class DeletedObject(models.Model): + + model_name = models.CharField(max_length=200) + object_data = JSONField(null=False) + date_deleted = models.DateTimeField(default=get_tz_aware_now_without_micros) \ No newline at end of file diff --git a/src/metax_api/models/file.py b/src/metax_api/models/file.py index 2f577619..9cbad0c6 100755 --- a/src/metax_api/models/file.py +++ b/src/metax_api/models/file.py @@ -91,5 +91,8 @@ def __repr__(self): self.file_path, ) - def delete(self): - super(File, self).remove() + def delete(self, *args, **kwargs): + if kwargs.get("hard"): + super().delete() + else: + super(File, self).remove() diff --git a/src/metax_api/signals/__init__.py b/src/metax_api/signals/__init__.py index 9bb77660..3c389606 100644 --- a/src/metax_api/signals/__init__.py +++ b/src/metax_api/signals/__init__.py @@ -1 +1,2 @@ +from .post_delete import * from .request_finished import * \ No newline at end of file diff --git a/src/metax_api/signals/post_delete.py b/src/metax_api/signals/post_delete.py new file mode 100644 index 00000000..1ff39f75 --- /dev/null +++ b/src/metax_api/signals/post_delete.py @@ -0,0 +1,27 @@ +import json +import logging + +from django.db.models.signals import post_delete +from django.dispatch import receiver +from django.core.serializers.json import DjangoJSONEncoder +from django.forms.models import model_to_dict +from ..models import DeletedObject + +_logger = logging.getLogger(__name__) + +@receiver(post_delete) +def deleted_object_receiver(instance, *args, **kwargs): + try: + model_type = instance._meta.model.__name__ + if hasattr(instance, '_initial_data["date_created"]'): + instance._initial_data["date_created"] = instance._initial_data["date_created"].strftime("%m/%d/%Y, %H:%M:%S") + if hasattr(instance, 'date_created'): + instance.date_created = instance.date_created.strftime("%m/%d/%Y, %H:%M:%S") + if hasattr(instance, 'date_modified'): + instance.date_modified = instance.date_modified.strftime("%m/%d/%Y, %H:%M:%S") + instance = model_to_dict(instance) + deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) + DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) + except Exception as e: + _logger.error("cannot save Deleted Object. Discarding..") + _logger.debug(f"error: {e}") \ No newline at end of file diff --git a/src/metax_api/tests/models/__init__.py b/src/metax_api/tests/models/__init__.py index 44f3e738..d8b29da6 100755 --- a/src/metax_api/tests/models/__init__.py +++ b/src/metax_api/tests/models/__init__.py @@ -14,3 +14,4 @@ from .data_catalog import DataCatalogModelTests from .directory import DirectoryModelTests from .file import FileModelBasicTest, FileManagerTests +from .signals import SignalTests diff --git a/src/metax_api/tests/models/signals.py b/src/metax_api/tests/models/signals.py new file mode 100644 index 00000000..548474ce --- /dev/null +++ b/src/metax_api/tests/models/signals.py @@ -0,0 +1,46 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + +from datetime import date + +from django.core.management import call_command +from django.test import TestCase + +from metax_api.models import CatalogRecord, CatalogRecordV2, DeletedObject, Directory, File +from metax_api.tests.utils import TestClassUtils, test_data_file_path + + +class SignalTests(TestCase, TestClassUtils): + + def setUp(self): + call_command("loaddata", test_data_file_path, verbosity=0) + self.today = date.today().strftime("%d/%m/%Y") + + def test_deleting_catalog_record_creates_new_deleted_object(self): + # test that deleting CatalogRecord object creates a new deleted object + CatalogRecord.objects_unfiltered.get(pk=1).delete(hard=True) + deleted_object = DeletedObject.objects.last() + self.assertEqual(deleted_object.model_name, "CatalogRecord") + self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) + + # test that deleting CatalogRecordV2 object creates a new deleted object + CatalogRecordV2.objects_unfiltered.get(pk=2).delete(hard=True) + deleted_object_v2 = DeletedObject.objects.last() + self.assertEqual(deleted_object_v2.model_name, "CatalogRecordV2") + self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today) + + def test_deleting_file_creates_new_deleted_object(self): + File.objects.get(pk=1).delete(hard=True) + deleted_object = DeletedObject.objects.last() + self.assertEqual(deleted_object.model_name, "File") + self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) + + def test_deleting_directory_creates_new_deleted_object(self): + Directory.objects.get(pk=1).delete() + deleted_object = DeletedObject.objects.last() + self.assertEqual(deleted_object.model_name, "Directory") + self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) \ No newline at end of file From e9215dcf952e4dcfa04d495c3b784f66d53c2be2 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Thu, 2 Sep 2021 11:53:41 +0300 Subject: [PATCH 066/160] removed editor table from schema as well as all swagger documentation, tests etc related --- .../rest/base/api_schemas/catalogrecord.json | 36 ------- .../rest/base/schemas/att_dataset_schema.json | 53 ---------- .../schemas/harvester_dataset_schema.json | 53 ---------- .../rest/base/schemas/ida_dataset_schema.json | 53 ---------- .../serializers/catalog_record_serializer.py | 1 - .../v2/api_schemas/ida_dataset_schema.json | 53 ---------- .../serializers/catalog_record_serializer.py | 3 - .../0038_remove_catalogrecord_editor.py | 17 ++++ src/metax_api/models/catalog_record.py | 23 +---- src/metax_api/models/catalog_record_v2.py | 14 --- .../services/catalog_record_service.py | 10 -- src/metax_api/services/file_service.py | 2 +- src/metax_api/swagger/v1/swagger.yaml | 4 - src/metax_api/swagger/v2/swagger.yaml | 4 - .../api/rest/base/views/datasets/read.py | 26 ----- .../api/rest/base/views/datasets/write.py | 43 --------- .../api/rest/v2/views/datasets/drafts.py | 1 - .../tests/api/rpc/v2/views/dataset_rpc.py | 1 - .../tests/testdata/generate_test_data.py | 14 --- src/metax_api/tests/testdata/test_data.json | 96 ------------------- src/metax_api/tests/utils.py | 2 - 21 files changed, 19 insertions(+), 490 deletions(-) create mode 100644 src/metax_api/migrations/0038_remove_catalogrecord_editor.py diff --git a/src/metax_api/api/rest/base/api_schemas/catalogrecord.json b/src/metax_api/api/rest/base/api_schemas/catalogrecord.json index 91a8c6ec..8e25e74f 100755 --- a/src/metax_api/api/rest/base/api_schemas/catalogrecord.json +++ b/src/metax_api/api/rest/base/api_schemas/catalogrecord.json @@ -133,12 +133,6 @@ "maxItems":1, "type":"string" }, - "editor":{ - "title":"Editor", - "description":"The editor that is used to modify the record.", - "type":"object", - "$ref":"#/definitions/Editor" - }, "dataset_version_set":{ "title":"Dataset version set", "description":"Information about the different related dataset versions.", @@ -270,36 +264,6 @@ "metadata_provider_user" ] }, - "Editor":{ - "title":"Editor", - "description":"An editor object, holds information related to an editor that is used to modify the record. Can contain other fields that are useful to the editor.", - "properties":{ - "creator_id":{ - "title":"Creator id", - "description":"Id of the creator in the editor. May (currently) be the same value as user_created in CatalogRecord, if the requestor has set that value.", - "type":"string" - }, - "owner_id":{ - "title":"Owner id", - "description":"Id of the owner in the editor", - "type":"string" - }, - "record_id":{ - "title":"Record id", - "description":"Internal id of the record in the editor's system", - "type":"string" - }, - "identifier":{ - "title":"identifier", - "description":"Id of the editor, for example: qvain", - "type":"string" - } - }, - "required":[ - "owner_id", - "identifier" - ] - }, "CatalogRecordVersion":{ "title":"CatalogRecord Version", "description":"Information about various versions of a CatalogRecord.", diff --git a/src/metax_api/api/rest/base/schemas/att_dataset_schema.json b/src/metax_api/api/rest/base/schemas/att_dataset_schema.json index 5047bcbd..a0e67f41 100755 --- a/src/metax_api/api/rest/base/schemas/att_dataset_schema.json +++ b/src/metax_api/api/rest/base/schemas/att_dataset_schema.json @@ -252,14 +252,6 @@ "type":"object", "$ref":"#/definitions/CatalogRecord" }, - "editor":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#editor", - "title":"Editor", - "description":"Software or service that is used to modify the catalog record and the dataset", - "@type":"@id", - "type":"object", - "$ref":"#/definitions/Editor" - }, "identifier":{ "@id":"http://uri.suomi.fi/datamodel/ns/mrd#identifier", "title":"Identifier", @@ -465,51 +457,6 @@ ], "additionalProperties": false }, - "Editor":{ - "title":"Editor", - "type":"object", - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#Editor", - "description":"Software or service that is used to modify the catalog record and the dataset.", - "minProperties":1, - "properties":{ - "identifier":{ - "@id":"http://purl.org/dc/terms/identifier", - "title":"Identifier", - "description":"Identifier of the editor, such as Qvain or harvester.", - "enum":[ - "QVAIN" - ], - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "owner_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#owner_identifier", - "title":"Owner identifier", - "description":"owner of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "creator_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#creator_identifier", - "title":"Creator identifier", - "description":"creator of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "record_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#recordIdentifier", - "title":"Record identifier", - "description":"local record identifier", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - } - }, - "additionalProperties": false - }, "EntityRelation":{ "title":"Entity relation", "type":"object", diff --git a/src/metax_api/api/rest/base/schemas/harvester_dataset_schema.json b/src/metax_api/api/rest/base/schemas/harvester_dataset_schema.json index 55a099e6..cc047bcb 100755 --- a/src/metax_api/api/rest/base/schemas/harvester_dataset_schema.json +++ b/src/metax_api/api/rest/base/schemas/harvester_dataset_schema.json @@ -253,14 +253,6 @@ "type":"object", "$ref":"#/definitions/CatalogRecord" }, - "editor":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#editor", - "title":"Editor", - "description":"Software or service that is used to modify the catalog record and the dataset", - "@type":"@id", - "type":"object", - "$ref":"#/definitions/Editor" - }, "identifier":{ "@id":"http://uri.suomi.fi/datamodel/ns/mrd#identifier", "title":"Identifier", @@ -514,51 +506,6 @@ ], "additionalProperties": false }, - "Editor":{ - "title":"Editor", - "type":"object", - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#Editor", - "description":"Software or service that is used to modify the catalog record and the dataset.", - "minProperties":1, - "properties":{ - "identifier":{ - "@id":"http://purl.org/dc/terms/identifier", - "title":"Identifier", - "description":"Identifier of the editor, such as Qvain or harvester.", - "enum":[ - "QVAIN" - ], - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "owner_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#owner_identifier", - "title":"Owner identifier", - "description":"owner of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "creator_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#creator_identifier", - "title":"Creator identifier", - "description":"creator of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "record_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#recordIdentifier", - "title":"Record identifier", - "description":"local record identifier", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - } - }, - "additionalProperties": true - }, "EntityRelation":{ "title":"Entity relation", "type":"object", diff --git a/src/metax_api/api/rest/base/schemas/ida_dataset_schema.json b/src/metax_api/api/rest/base/schemas/ida_dataset_schema.json index 810b92e1..3e59da3c 100755 --- a/src/metax_api/api/rest/base/schemas/ida_dataset_schema.json +++ b/src/metax_api/api/rest/base/schemas/ida_dataset_schema.json @@ -252,14 +252,6 @@ "type":"object", "$ref":"#/definitions/CatalogRecord" }, - "editor":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#editor", - "title":"Editor", - "description":"Software or service that is used to modify the catalog record and the dataset", - "@type":"@id", - "type":"object", - "$ref":"#/definitions/Editor" - }, "identifier":{ "@id":"http://uri.suomi.fi/datamodel/ns/mrd#identifier", "title":"Identifier", @@ -490,51 +482,6 @@ "identifier" ] }, - "Editor":{ - "title":"Editor", - "type":"object", - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#Editor", - "description":"Software or service that is used to modify the catalog record and the dataset.", - "minProperties":1, - "properties":{ - "identifier":{ - "@id":"http://purl.org/dc/terms/identifier", - "title":"Identifier", - "description":"Identifier of the editor, such as Qvain or harvester.", - "enum":[ - "QVAIN" - ], - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "owner_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#owner_identifier", - "title":"Owner identifier", - "description":"owner of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "creator_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#creator_identifier", - "title":"Creator identifier", - "description":"creator of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "record_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#recordIdentifier", - "title":"Record identifier", - "description":"local record identifier", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - } - }, - "additionalProperties": true - }, "EntityRelation":{ "title":"Entity relation", "type":"object", diff --git a/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py b/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py index acc6d675..c262c020 100755 --- a/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py +++ b/src/metax_api/api/rest/base/serializers/catalog_record_serializer.py @@ -79,7 +79,6 @@ class Meta: "mets_object_identifier", "state", "use_doi_for_published", - "editor", "cumulative_state", "date_cumulation_started", "date_cumulation_ended", diff --git a/src/metax_api/api/rest/v2/api_schemas/ida_dataset_schema.json b/src/metax_api/api/rest/v2/api_schemas/ida_dataset_schema.json index f2337862..b315e05e 100755 --- a/src/metax_api/api/rest/v2/api_schemas/ida_dataset_schema.json +++ b/src/metax_api/api/rest/v2/api_schemas/ida_dataset_schema.json @@ -252,14 +252,6 @@ "type":"object", "$ref":"#/definitions/CatalogRecord" }, - "editor":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#editor", - "title":"Editor", - "description":"Software or service that is used to modify the catalog record and the dataset", - "@type":"@id", - "type":"object", - "$ref":"#/definitions/Editor" - }, "identifier":{ "@id":"http://uri.suomi.fi/datamodel/ns/mrd#identifier", "title":"Identifier", @@ -490,51 +482,6 @@ "identifier" ] }, - "Editor":{ - "title":"Editor", - "type":"object", - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#Editor", - "description":"Software or service that is used to modify the catalog record and the dataset.", - "minProperties":1, - "properties":{ - "identifier":{ - "@id":"http://purl.org/dc/terms/identifier", - "title":"Identifier", - "description":"Identifier of the editor, such as Qvain or harvester.", - "enum":[ - "QVAIN" - ], - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "owner_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#owner_identifier", - "title":"Owner identifier", - "description":"owner of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "creator_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#creator_identifier", - "title":"Creator identifier", - "description":"creator of the resource", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - }, - "record_id":{ - "@id":"http://uri.suomi.fi/datamodel/ns/mrd#recordIdentifier", - "title":"Record identifier", - "description":"local record identifier", - "@type":"http://www.w3.org/2001/XMLSchema#string", - "minLength":1, - "type":"string" - } - }, - "additionalProperties": true - }, "EntityRelation":{ "title":"Entity relation", "type":"object", diff --git a/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py b/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py index f893a414..dd60e9af 100755 --- a/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py +++ b/src/metax_api/api/rest/v2/serializers/catalog_record_serializer.py @@ -57,7 +57,6 @@ def is_valid(self, raise_exception=False): self.initial_data["data_catalog"] = DFT_CATALOG self.initial_data.pop("draft_of", None) - self.initial_data.pop("editor", None) self.initial_data.pop("next_draft", None) super().is_valid(raise_exception=raise_exception) @@ -86,8 +85,6 @@ def to_representation(self, instance): else: del res["next_draft"] - res.pop("editor", None) - return res def validate_research_dataset_files(self, value): diff --git a/src/metax_api/migrations/0038_remove_catalogrecord_editor.py b/src/metax_api/migrations/0038_remove_catalogrecord_editor.py new file mode 100644 index 00000000..12f78871 --- /dev/null +++ b/src/metax_api/migrations/0038_remove_catalogrecord_editor.py @@ -0,0 +1,17 @@ +# Generated by Django 3.1.12 on 2021-08-30 08:49 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0037_auto_20210811_1037'), + ] + + operations = [ + migrations.RemoveField( + model_name='catalogrecord', + name='editor', + ), + ] diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 7f303595..a8b76cfb 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -345,11 +345,6 @@ class CatalogRecord(Common): max_length=200, null=False, help_text="Non-modifiable after creation" ) - editor = JSONField( - null=True, - help_text="Editor specific fields, such as owner_id, modified, record_identifier", - ) - preservation_dataset_version = models.OneToOneField( "self", on_delete=models.DO_NOTHING, @@ -564,9 +559,7 @@ def user_is_owner(self, request): _logger.debug("request.user.username = %s", request.user.username) raise Http404 - if self.editor and "owner_id" in self.editor: - return request.user.username == self.editor["owner_id"] - elif self.metadata_provider_user: + if self.metadata_provider_user: return request.user.username == self.metadata_provider_user # note: once access control plans evolve, user_created may not be a legit field ever @@ -2365,20 +2358,6 @@ def _create_new_dataset_version(self): old_version.research_dataset = deepcopy(old_version._initial_data["research_dataset"]) old_version.next_dataset_version = new_version - if new_version.editor: - # some of the old editor fields cant be true in the new version, so keep - # only the ones that make sense. it is up to the editor, to update other fields - # they see as relevant. we also dont want null values in there - old_editor = deepcopy(new_version.editor) - new_version.editor = {} - if "owner_id" in old_editor: - new_version.editor["owner_id"] = old_editor["owner_id"] - if "creator_id" in old_editor: - new_version.editor["creator_id"] = old_editor["creator_id"] - if "identifier" in old_editor: - # todo this probably does not make sense... ? - new_version.editor["identifier"] = old_editor["identifier"] - super(Common, new_version).save() new_version.calculate_directory_byte_sizes_and_file_counts() diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index ceecef15..b9ee5895 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -1411,20 +1411,6 @@ def _create_new_dataset_version(self): old_version.dataset_version_set.records.add(new_version) old_version.next_dataset_version = new_version - if new_version.editor: - # some of the old editor fields cant be true in the new version, so keep - # only the ones that make sense. it is up to the editor, to update other fields - # they see as relevant. we also dont want null values in there - old_editor = deepcopy(new_version.editor) - new_version.editor = {} - if "owner_id" in old_editor: - new_version.editor["owner_id"] = old_editor["owner_id"] - if "creator_id" in old_editor: - new_version.editor["creator_id"] = old_editor["creator_id"] - if "identifier" in old_editor: - # todo this probably does not make sense... ? - new_version.editor["identifier"] = old_editor["identifier"] - # v2 api successfully invoked, change the api version to prevent further updates on v1 api self._set_api_version() diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 98638fcf..bc4ba1d3 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -85,19 +85,9 @@ def get_queryset_search_params(cls, request): "curator": [{"identifier": request.query_params["curator"]}] } - if request.query_params.get("owner_id", False): - queryset_search_params["editor__contains"] = { - "owner_id": request.query_params["owner_id"] - } - if request.query_params.get("user_created", False): queryset_search_params["user_created"] = request.query_params["user_created"] - if request.query_params.get("editor", False): - queryset_search_params["editor__contains"] = { - "identifier": request.query_params["editor"] - } - if request.query_params.get("metadata_provider_user", False): queryset_search_params["metadata_provider_user"] = request.query_params[ "metadata_provider_user" diff --git a/src/metax_api/services/file_service.py b/src/metax_api/services/file_service.py index a1ebd0db..ad2d9617 100755 --- a/src/metax_api/services/file_service.py +++ b/src/metax_api/services/file_service.py @@ -753,7 +753,7 @@ def _get_cr_if_relevant(cls, cr_identifier, directory, request): try: cr = CatalogRecord.objects.only( - "id", "_directory_data", "editor", "user_created", "research_dataset" + "id", "_directory_data", "user_created", "research_dataset" ).get(**cr_params) except CatalogRecord.DoesNotExist: # raise 400 instead of 404, to distinguish from the error diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index f230d356..d4ad458b 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -953,10 +953,6 @@ paths: description: A specific filter that targets the following fields; research_dataset['title'], research_dataset['curator'][n]['name'], contract['contract_json']['title']. Restricted to permitted users. required: false type: string - - name: editor - in: query - description: Identifier of the editor used to modify the record, i.e. qvain. - type: string - name: data_catalog in: query description: Filter by data catalog urn identifier diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 29060a1d..6f297233 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -958,10 +958,6 @@ paths: description: A specific filter that targets the following fields; research_dataset['title'], research_dataset['curator'][n]['name'], contract['contract_json']['title']. Restricted to permitted users. required: false type: string - - name: editor - in: query - description: Identifier of the editor used to modify the record, i.e. qvain. - type: string - name: data_catalog in: query description: Filter by data catalog urn identifier diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index 75034792..e4bb502d 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -750,15 +750,6 @@ def test_read_catalog_record_search_by_curator_and_state_not_found(self): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data["results"]), 0) - def test_read_catalog_record_search_by_owner_id(self): - cr = CatalogRecord.objects.get(pk=1) - cr.editor = {"owner_id": "123"} - cr.save() - response = self.client.get("/rest/datasets?owner_id=123") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data["results"]), 1) - self.assertEqual(response.data["results"][0]["editor"]["owner_id"], "123") - def test_read_catalog_record_search_by_creator_id(self): cr = CatalogRecord.objects.get(pk=1) cr.user_created = "123" @@ -768,23 +759,6 @@ def test_read_catalog_record_search_by_creator_id(self): self.assertEqual(len(response.data["results"]), 1) self.assertEqual(response.data["results"][0]["user_created"], "123") - def test_read_catalog_record_search_by_editor(self): - response = self.client.get("/rest/datasets?editor=mspaint") - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["count"], 0) - - response = self.client.get("/rest/datasets?editor=qvain") - self.assertEqual(response.status_code, status.HTTP_200_OK) - qvain_records_count = response.data["count"] - self.assertEqual(qvain_records_count > 0, True) - - response = self.client.get("/rest/datasets") - self.assertNotEqual( - response.data["count"], - qvain_records_count, - "looks like filtering had no effect", - ) - def test_read_catalog_record_search_by_metadata_provider_user(self): response = self.client.get("/rest/datasets?metadata_provider_user=123") self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index 2986b797..7f7405d5 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -480,15 +480,6 @@ def test_create_catalog_record_list_error_all_fail(self): self.assertEqual(len(response.data["success"]), 0) self.assertEqual(len(response.data["failed"]), 2) - def test_create_catalog_record_editor_field_is_optional(self): - response = self.client.post("/rest/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new = response.data - new["research_dataset"]["title"]["en"] = "updated title" - new.pop("editor") - response = self.client.put("/rest/datasets/%d" % new["id"], new, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - def test_parameter_migration_override_preferred_identifier_when_creating(self): """ Normally, when saving to att/ida catalogs, providing a custom preferred_identifier is not @@ -4232,7 +4223,6 @@ def _set_cr_owner_to_token_user(self, cr_id): cr = CatalogRecord.objects.get(pk=cr_id) cr.user_created = self.token["CSCUserName"] cr.metadata_provider_user = self.token["CSCUserName"] - cr.editor = None # pretend the record was created by user directly cr.force_save() def _set_cr_to_permitted_catalog(self, cr_id): @@ -4256,7 +4246,6 @@ def test_user_can_create_dataset(self): self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] # ida self.cr_test_data["contract"] = 1 - self.cr_test_data["editor"] = {"nope": "discarded by metax"} self.cr_test_data["preservation_description"] = "discarded by metax" self.cr_test_data["preservation_reason_description"] = "discarded by metax" self.cr_test_data["preservation_state"] = 10 @@ -4276,7 +4265,6 @@ def test_user_can_create_dataset(self): self.assertEqual(response.data["metadata_provider_org"], metadata_provider_org) self.assertEqual(response.data["metadata_owner_org"], metadata_owner_org) self.assertEqual("contract" in response.data, False) - self.assertEqual("editor" in response.data, False) self.assertEqual("preservation_description" in response.data, False) self.assertEqual("preservation_reason_description" in response.data, False) self.assertEqual(response.data["preservation_state"], 0) @@ -4334,7 +4322,6 @@ def test_owner_can_edit_dataset(self): # research_dataset is the only permitted field to edit modified_data["research_dataset"]["value"] = 112233 modified_data["contract"] = 1 - modified_data["editor"] = {"nope": "discarded by metax"} modified_data["preservation_description"] = "discarded by metax" modified_data["preservation_reason_description"] = "discarded by metax" modified_data["preservation_state"] = 10 @@ -4348,7 +4335,6 @@ def test_owner_can_edit_dataset(self): # none of these should have been affected self.assertEqual("contract" in response.data, False) - self.assertEqual("editor" in response.data, False) self.assertEqual("preservation_description" in response.data, False) self.assertEqual("preservation_reason_description" in response.data, False) self.assertEqual(response.data["preservation_state"], 0) @@ -4364,7 +4350,6 @@ def test_owner_can_edit_datasets_only_in_permitted_catalogs(self): self.cr_test_data["data_catalog"] = 1 self.cr_test_data["user_created"] = self.token["CSCUserName"] self.cr_test_data["metadata_provider_user"] = self.token["CSCUserName"] - self.cr_test_data.pop("editor", None) self._use_http_authorization() # create cr as a service-user response = self.client.post("/rest/datasets", self.cr_test_data, format="json") @@ -4379,33 +4364,6 @@ def test_owner_can_edit_datasets_only_in_permitted_catalogs(self): ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - @responses.activate - def test_owner_can_edit_dataset_check_perms_from_editor_field(self): - """ - Ensure end user perms are also checked from the field 'editor', which may be - set by .e.g. qvain. - """ - self.cr_test_data["data_catalog"] = END_USER_ALLOWED_DATA_CATALOGS[0] - self.cr_test_data[ - "user_created" - ] = "editor field is checked before this field, so should be ok" - self.cr_test_data["editor"] = {"owner_id": self.token["CSCUserName"]} - - self._use_http_authorization() # create cr as a service-user to ensure editor-field is set - - response = self.client.post("/rest/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - self._use_http_authorization(method="bearer", token=self.token) - response = self.client.get("/rest/datasets/%d" % response.data["id"], format="json") - modified_data = response.data - modified_data["research_dataset"]["value"] = 112233 - - response = self.client.put( - "/rest/datasets/%d" % response.data["id"], modified_data, format="json" - ) - self.assertEqual(response.status_code, status.HTTP_200_OK) - @responses.activate def test_other_users_cant_edit_dataset(self): """ @@ -4562,7 +4520,6 @@ def test_external_service_can_not_read_all_metadata_in_other_catalog(self): cr.user_created = "#### Some owner who is not you ####" cr.metadata_provider_user = "#### Some owner who is not you ####" cr.data_catalog = dc2 - cr.editor = None cr.research_dataset["access_rights"]["access_type"]["identifier"] = ACCESS_TYPES[ "restricted" ] diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py index 00b5c7a9..710ecfef 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py @@ -72,7 +72,6 @@ def _set_cr_owner_and_state(self, cr_id, state, owner): cr.state = state cr.user_created = owner cr.metadata_provider_user = owner - cr.editor = None # pretend the record was created by user directly cr.data_catalog_id = DataCatalog.objects.get( catalog_json__identifier=END_USER_ALLOWED_DATA_CATALOGS[0] ).id diff --git a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py index d4382b12..d89f0c29 100755 --- a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py @@ -237,7 +237,6 @@ def test_authorization(self): # change owner, try again. should be OK cr = CatalogRecordV2.objects.get(pk=1) cr.metadata_provider_user = self.token["CSCUserName"] - cr.editor = None cr.force_save() response = self.client.post( diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index 4b4931e8..6729396a 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -651,10 +651,6 @@ def generate_catalog_records( new["fields"]["data_catalog"] = data_catalog_id new["fields"]["date_modified"] = "2017-09-23T10:07:22Z" new["fields"]["date_created"] = "2017-05-23T10:07:22Z" - new["fields"]["editor"] = { - "owner_id": catalog_records_owner_ids[j], - "creator_id": catalog_records_owner_ids[owner_idx], - } new["fields"]["research_dataset"]["metadata_version_identifier"] = generate_test_identifier( cr_type, len(test_data_list) + 1, urn=False @@ -772,9 +768,6 @@ def generate_alt_catalog_records(test_data_list): # # create a couple of alternate records for record with id 10 # - # note, these alt records wont have an editor-field set, since they presumably - # originated to metax from somewhere else than qvain (were harvested). - # print("generating alternate catalog records...") alternate_record_set = { "fields": {}, @@ -832,13 +825,6 @@ def set_qvain_info_to_records(catalog_record_list): continue if cr["fields"]["data_catalog"] not in (1, 2): continue - cr["fields"]["editor"] = { - "owner_id": catalog_records_owner_ids[owner_idx], - "creator_id": catalog_records_owner_ids[owner_idx], - "identifier": "qvain", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9%03d" - % cr["pk"], # 3 leading zeroes to preserve length - } owner_idx += 1 if owner_idx >= total_qvain_users: owner_idx = 0 diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index dbe75ff1..c3802a10 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5727,12 +5727,6 @@ "dataset_version_set": 1, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "identifier": "qvain", - "owner_id": "053bffbcc41edad4853bea91fc42ea18", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9001" - }, "files": [ 1, 2 @@ -5888,12 +5882,6 @@ "dataset_version_set": 2, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "identifier": "qvain", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9002" - }, "files": [ 3, 4 @@ -6049,12 +6037,6 @@ "dataset_version_set": 3, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "05593961536b76fa825281ccaedd4d4f", - "identifier": "qvain", - "owner_id": "05593961536b76fa825281ccaedd4d4f", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9003" - }, "files": [ 5, 6 @@ -6210,12 +6192,6 @@ "dataset_version_set": 4, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea4dade5ab2145954f56d4b51cef0", - "identifier": "qvain", - "owner_id": "055ea4dade5ab2145954f56d4b51cef0", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9004" - }, "files": [ 7, 8 @@ -6371,12 +6347,6 @@ "dataset_version_set": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea531a6cac569425bed94459266ee", - "identifier": "qvain", - "owner_id": "055ea531a6cac569425bed94459266ee", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9005" - }, "files": [ 9, 10 @@ -6532,12 +6502,6 @@ "dataset_version_set": 6, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "identifier": "qvain", - "owner_id": "053bffbcc41edad4853bea91fc42ea18", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9006" - }, "files": [ 11, 12 @@ -6693,12 +6657,6 @@ "dataset_version_set": 7, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "identifier": "qvain", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9007" - }, "files": [ 13, 14 @@ -6854,12 +6812,6 @@ "dataset_version_set": 8, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "05593961536b76fa825281ccaedd4d4f", - "identifier": "qvain", - "owner_id": "05593961536b76fa825281ccaedd4d4f", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9008" - }, "files": [ 15, 16 @@ -7000,12 +6952,6 @@ "dataset_version_set": 9, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea4dade5ab2145954f56d4b51cef0", - "identifier": "qvain", - "owner_id": "055ea4dade5ab2145954f56d4b51cef0", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9009" - }, "files": [ 17, 18 @@ -7132,12 +7078,6 @@ "dataset_version_set": 10, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea531a6cac569425bed94459266ee", - "identifier": "qvain", - "owner_id": "055ea531a6cac569425bed94459266ee", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9010" - }, "files": [ 19, 20 @@ -7261,12 +7201,6 @@ "dataset_version_set": 11, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "identifier": "qvain", - "owner_id": "053bffbcc41edad4853bea91fc42ea18", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9011" - }, "files": [ 1, 2, @@ -8218,12 +8152,6 @@ "dataset_version_set": 12, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "identifier": "qvain", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9012" - }, "files": [ 1, 2, @@ -9175,12 +9103,6 @@ "dataset_version_set": 13, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "05593961536b76fa825281ccaedd4d4f", - "identifier": "qvain", - "owner_id": "05593961536b76fa825281ccaedd4d4f", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9013" - }, "files": [ 22, 23, @@ -11577,10 +11499,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "owner_id": "053bffbcc41edad4853bea91fc42ea18" - }, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9624", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -12515,10 +12433,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd" - }, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9625", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -13453,10 +13367,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "owner_id": "05593961536b76fa825281ccaedd4d4f" - }, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9626", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -14394,12 +14304,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea4dade5ab2145954f56d4b51cef0", - "identifier": "qvain", - "owner_id": "055ea4dade5ab2145954f56d4b51cef0", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9027" - }, "files": [ 19, 20 diff --git a/src/metax_api/tests/utils.py b/src/metax_api/tests/utils.py index 1b5b7992..6fd25b6a 100755 --- a/src/metax_api/tests/utils.py +++ b/src/metax_api/tests/utils.py @@ -257,8 +257,6 @@ def _get_object_from_test_data(self, model_name, requested_index=0): def _create_cr_for_owner(self, pk_for_template_cr, data): self.token = get_test_oidc_token() - if "editor" in data: - data.pop("editor", None) data["user_created"] = self.token["CSCUserName"] data["metadata_provider_user"] = self.token["CSCUserName"] data["metadata_provider_org"] = self.token["schacHomeOrganization"] From 5436c59404f1a485b6fed361e51ef27d489606fc Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Mon, 2 Aug 2021 11:06:14 +0300 Subject: [PATCH 067/160] added functionality to fetch PAS templates --- docs/api/v1/datasets.rst | 2 +- docs/api/v2/datasets.rst | 2 +- .../api/rpc/base/views/dataset_rpc.py | 12 +++++++--- src/metax_api/swagger/v1/swagger.yaml | 4 ++-- src/metax_api/swagger/v2/swagger.yaml | 4 ++-- .../tests/api/rpc/base/views/dataset_rpc.py | 23 +++++++++++++++++++ 6 files changed, 38 insertions(+), 9 deletions(-) diff --git a/docs/api/v1/datasets.rst b/docs/api/v1/datasets.rst index fd6d651b..53115af3 100755 --- a/docs/api/v1/datasets.rst +++ b/docs/api/v1/datasets.rst @@ -615,7 +615,7 @@ When services interact with Metax, services have the additional responsibility o Retrieve minimal valid dataset template ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The API ``GET /rpc/datasets/get_minimal_dataset_template`` returns a valid minimal dataset, that can be used as-is to create a dataset into Metax. +The API ``GET /rpc/datasets/get_minimal_dataset_template`` returns a valid minimal dataset, that can be used as-is to create a dataset into Metax. PAS template can be fetched with type ``enduser_pas``. .. code-block:: python diff --git a/docs/api/v2/datasets.rst b/docs/api/v2/datasets.rst index 0f875fcf..5281c8ff 100755 --- a/docs/api/v2/datasets.rst +++ b/docs/api/v2/datasets.rst @@ -586,7 +586,7 @@ When services accounts interact with Metax, services have the additional respons Retrieve minimal valid dataset template ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The API ``GET /rpc/datasets/get_minimal_dataset_template`` returns a valid minimal dataset, that can be used as-is to create a dataset into Metax. +The API ``GET /rpc/datasets/get_minimal_dataset_template`` returns a valid minimal dataset, that can be used as-is to create a dataset into Metax. PAS template can be fetched with type ``enduser_pas``. .. code-block:: python diff --git a/src/metax_api/api/rpc/base/views/dataset_rpc.py b/src/metax_api/api/rpc/base/views/dataset_rpc.py index 2450c615..c66fb3f1 100755 --- a/src/metax_api/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/base/views/dataset_rpc.py @@ -37,11 +37,11 @@ class DatasetRPC(CommonRPC): @action(detail=False, methods=["get"], url_path="get_minimal_dataset_template") def get_minimal_dataset_template(self, request): - if request.query_params.get("type", None) not in ["service", "enduser"]: + if request.query_params.get("type", None) not in ["service", "enduser", "service_pas", "enduser_pas"]: raise Http400( { "detail": [ - "query param 'type' missing or wrong. please specify ?type= as one of: service, enduser" + "query param 'type' missing or wrong. please specify ?type= as one of: service, enduser, service_pas, enduser_pas" ] } ) @@ -51,7 +51,13 @@ def get_minimal_dataset_template(self, request): example_ds["data_catalog"] = django_settings.END_USER_ALLOWED_DATA_CATALOGS[0] - if request.query_params["type"] == "enduser": + if request.query_params["type"].endswith("_pas"): + if 'issued' not in example_ds['research_dataset']: + example_ds['research_dataset']['issued'] = '2019-01-01' + if 'publisher' not in example_ds['research_dataset']: + example_ds['research_dataset']['publisher'] = example_ds['research_dataset']['creator'][0] + + if request.query_params["type"].startswith("enduser"): example_ds.pop("metadata_provider_org", None) example_ds.pop("metadata_provider_user", None) diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index d4ad458b..efbc2f82 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -1621,11 +1621,11 @@ paths: /rpc/datasets/get_minimal_dataset_template: get: summary: Get minimal dataset template. - description: Get minimal dataset template that can be used to create datasets. Service and End Users have different kind of templates. The returned template can be used as-is when creating a dataset into Metax. + description: Get minimal dataset template that can be used to create datasets. Service and End Users have different kind of templates. It is also possible to fetch PAS templates for Service and End Users. The returned template can be used as-is when creating a dataset into Metax. parameters: - name: type in: query - description: Type of dataset template to retrieve. Accepted values: service, enduser. + description: Type of dataset template to retrieve. Accepted values: service, enduser, service_pas, enduser_pas. required: true type: string responses: diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 6f297233..58c8b7c5 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -1810,11 +1810,11 @@ paths: /rpc/v2/datasets/get_minimal_dataset_template: get: summary: Get minimal dataset template. - description: Get minimal dataset template that can be used to create datasets. Service and End Users have different kind of templates. The returned template can be used as-is when creating a dataset into Metax. + description: Get minimal dataset template that can be used to create datasets. Service and End Users have different kind of templates. It is also possible to fetch PAS templates for Service and End Users. The returned template can be used as-is when creating a dataset into Metax. parameters: - name: type in: query - description: Type of dataset template to retrieve. Accepted values: service, enduser. + description: Type of dataset template to retrieve. Accepted values: service, enduser, service_pas, enduser_pas. required: true type: string responses: diff --git a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py index 2e602a6f..ab033762 100755 --- a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py @@ -65,6 +65,29 @@ def test_get_minimal_dataset_template(self): response = self.client.post("/rest/datasets", response.data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) + # test minimal dataset for PAS service use + response = self.client.get("/rpc/datasets/get_minimal_dataset_template?type=service_pas") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertTrue("metadata_provider_org" in response.data) + self.assertTrue("metadata_provider_user" in response.data) + self.assertEqual(response.data["research_dataset"]["issued"], "2019-01-01") + self.assertEqual(response.data["research_dataset"]["publisher"], response.data["research_dataset"]["creator"][0]) + self._use_http_authorization(username="testuser") + response = self.client.post("/rest/datasets", response.data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + # test minimal dataset for PAS end user use + response = self.client.get("/rpc/datasets/get_minimal_dataset_template?type=enduser_pas") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertTrue("metadata_provider_org" not in response.data) + self.assertTrue("metadata_provider_user" not in response.data) + self.assertEqual(response.data["research_dataset"]["issued"], "2019-01-01") + self.assertEqual(response.data["research_dataset"]["publisher"], response.data["research_dataset"]["creator"][0]) + self._use_http_authorization(method="bearer", token=get_test_oidc_token()) + self._mock_token_validation_succeeds() + response = self.client.post("/rest/datasets", response.data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + def test_set_preservation_identifier(self): self._set_http_authorization("service") From b9c8079f7579ef1d88004fd81c5d57b77bb550e4 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 7 Sep 2021 16:28:56 +0300 Subject: [PATCH 068/160] add test_view for quick manual testing --- src/metax_api/templates/test_view.html | 6 ++++++ src/metax_api/urls.py | 2 ++ src/metax_api/views/test_view.py | 15 +++++++++++++++ 3 files changed, 23 insertions(+) create mode 100644 src/metax_api/templates/test_view.html create mode 100644 src/metax_api/views/test_view.py diff --git a/src/metax_api/templates/test_view.html b/src/metax_api/templates/test_view.html new file mode 100644 index 00000000..442c3d03 --- /dev/null +++ b/src/metax_api/templates/test_view.html @@ -0,0 +1,6 @@ + + + +

Test view page

+ +{{ cr.state }} diff --git a/src/metax_api/urls.py b/src/metax_api/urls.py index 0259e75a..5da93149 100755 --- a/src/metax_api/urls.py +++ b/src/metax_api/urls.py @@ -31,6 +31,7 @@ from metax_api.api.rpc.base.router import api_urlpatterns as rpc_api_v1 from metax_api.api.rpc.v2.router import api_urlpatterns as rpc_api_v2 from metax_api.views.router import view_urlpatterns +from metax_api.views.test_view import TestView v1_urls = [ url("", include(view_urlpatterns)), @@ -60,4 +61,5 @@ if django_settings.DEBUG: urlpatterns += [ path("__debug__/", include(debug_toolbar.urls)), + path("test_view/", TestView.as_view(), name="test_view") ] diff --git a/src/metax_api/views/test_view.py b/src/metax_api/views/test_view.py new file mode 100644 index 00000000..b5303d4f --- /dev/null +++ b/src/metax_api/views/test_view.py @@ -0,0 +1,15 @@ +from django.views.generic import TemplateView + +from metax_api.models import CatalogRecord + + +class TestView(TemplateView): + template_name = "test_view.html" + + def get_context_data(self, **kwargs): + context = super().get_context_data(**kwargs) # .filter(cast__in=self.request.user) + + cr = CatalogRecord.objects.first() + + context["cr"] = cr + return context \ No newline at end of file From bc7b60bc9cfa3d1e21c2ba8149b0ca6a25f75594 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 7 Sep 2021 16:39:25 +0300 Subject: [PATCH 069/160] fix debug toolbar not showing in the template --- src/metax_api/templates/test_view.html | 8 ++++++-- src/metax_api/urls.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/metax_api/templates/test_view.html b/src/metax_api/templates/test_view.html index 442c3d03..489966ac 100644 --- a/src/metax_api/templates/test_view.html +++ b/src/metax_api/templates/test_view.html @@ -1,6 +1,10 @@ - - + +

Test view page

{{ cr.state }} + + + + diff --git a/src/metax_api/urls.py b/src/metax_api/urls.py index 5da93149..85cdf746 100755 --- a/src/metax_api/urls.py +++ b/src/metax_api/urls.py @@ -60,6 +60,6 @@ if django_settings.DEBUG: urlpatterns += [ + path("test_view/", TestView.as_view(), name="test_view"), path("__debug__/", include(debug_toolbar.urls)), - path("test_view/", TestView.as_view(), name="test_view") ] From be4262ae1c8b222893ca64f13bcb5ba7f5487514 Mon Sep 17 00:00:00 2001 From: Toni Date: Wed, 8 Sep 2021 08:47:07 +0300 Subject: [PATCH 070/160] change matomo-url, add matomo script snippet to swagger html template --- docs/api/v1/_templates/layout.html | 2 +- docs/api/v2/_templates/layout.html | 2 +- src/metax_api/utils/convert_yaml_to_html.py | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/api/v1/_templates/layout.html b/docs/api/v1/_templates/layout.html index e51f202a..24c77670 100644 --- a/docs/api/v1/_templates/layout.html +++ b/docs/api/v1/_templates/layout.html @@ -2,6 +2,6 @@ {% block extrahead %} - + {% endblock %} \ No newline at end of file diff --git a/docs/api/v2/_templates/layout.html b/docs/api/v2/_templates/layout.html index e51f202a..24c77670 100644 --- a/docs/api/v2/_templates/layout.html +++ b/docs/api/v2/_templates/layout.html @@ -2,6 +2,6 @@ {% block extrahead %} - + {% endblock %} \ No newline at end of file diff --git a/src/metax_api/utils/convert_yaml_to_html.py b/src/metax_api/utils/convert_yaml_to_html.py index 2b7bb7e5..cb3c919a 100644 --- a/src/metax_api/utils/convert_yaml_to_html.py +++ b/src/metax_api/utils/convert_yaml_to_html.py @@ -33,6 +33,7 @@ background: #fafafa; } + From 2bebc95fbb08dc5e98a5484b23be6c28c4ec7d8a Mon Sep 17 00:00:00 2001 From: Toni Date: Wed, 8 Sep 2021 15:01:22 +0300 Subject: [PATCH 071/160] create the datamigration --- .../migrations/0039_auto_20210908_1151.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 src/metax_api/migrations/0039_auto_20210908_1151.py diff --git a/src/metax_api/migrations/0039_auto_20210908_1151.py b/src/metax_api/migrations/0039_auto_20210908_1151.py new file mode 100644 index 00000000..fd09268d --- /dev/null +++ b/src/metax_api/migrations/0039_auto_20210908_1151.py @@ -0,0 +1,87 @@ +# Generated by Django 3.1.13 on 2021-09-08 08:51 +import json + +from django.db import migrations + +import logging + +logger = logging.getLogger(__name__) + +datasets = [ + "1acea3cc-c245-4f6d-80ee-f9c6a6c907be", + "72dc2050-70bc-4f46-9fc7-b3ac2bd58bdc", + "9341625d-ebbb-4625-a2db-6c0424cdcf85", + "4a66600c-79b2-4dcf-a497-69540b4d0817", + "51c7c6c4-0129-4856-be71-072582882cb5", + "56ab6296-9bc9-41b3-95ea-b4655aa7978b", + "66cc0770-08f9-4df3-ac6f-b65a15f5e73e", +] + +""" +Changes: + +metadata_provider_user: tuokalma +to -> +metadata_provider_user: ksuomine + +access_granter: +name: Alma Tuokko +email: alma.tuokko@helsinki.fi +userid: tuokalma +to -> +access_granter: +name: Karina Lukin +email: karina.lukin@helsinki.fi +userid: ksuomine +""" + +def change_metadata_provider_user(apps, schema_editor): + CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') + for cr in CatalogRecord.objects.filter(identifier__in=datasets): + try: + if cr.metadata_provider_user: + provider_usr = json.loads(json.dumps(cr.metadata_provider_user)) + changed_usr = provider_usr.replace("tuokalma", "ksuomine") + cr.metadata_provider_user = changed_usr + + if cr.access_granter: + access_granter = json.loads(json.dumps(cr.access_granter)) + changed_granter = access_granter.replace("Alma Tuokko", "Karina Lukin") + changed_granter = changed_granter.replace("alma.tuokko@helsinki.fi", "karina.lukin@helsinki.fi") + changed_granter = changed_granter.replace("tuokalma", "ksuomine") + cr.access_granter = changed_granter + + cr.save() + except Exception as e: + logger.error(e) + + +def revert(apps, schema_editor): + CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') + for cr in CatalogRecord.objects.filter(identifier__in=datasets): + try: + if cr.metadata_provider_user: + provider_usr = json.loads(json.dumps(cr.metadata_provider_user)) + changed_usr = provider_usr.replace("ksuomine", "tuokalma") + cr.metadata_provider_user = changed_usr + + if cr.access_granter: + access_granter = json.loads(json.dumps(cr.access_granter)) + changed_granter = access_granter.replace("Karina Lukin", "Alma Tuokko") + changed_granter = changed_granter.replace("karina.lukin@helsinki.fi", "alma.tuokko@helsinki.fi") + changed_granter = changed_granter.replace("ksuomine", "tuokalma") + cr.access_granter = changed_granter + + cr.save() + except Exception as e: + logger.error(e) + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0038_remove_catalogrecord_editor'), + ] + + operations = [ + migrations.RunPython(change_metadata_provider_user, revert), + ] \ No newline at end of file From 92e7a29465f9185a9826dca379be8d3cacdab96a Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Mon, 13 Sep 2021 09:44:22 +0300 Subject: [PATCH 072/160] CSCFAIRMETA-1103: Support listing datasets by projects --- .../api/rest/base/views/common_view.py | 13 +++++- .../services/catalog_record_service.py | 32 ++++++++++++++ src/metax_api/swagger/v1/swagger.yaml | 5 +++ .../api/rest/base/views/datasets/read.py | 42 +++++++++++++++++++ 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index ce31ee14..14de18ca 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -170,6 +170,7 @@ def get_queryset(self): """ additional_filters = {} q_filters = [] + deduplicated_q_filters = [] CS.set_if_modified_since_filter(self.request, additional_filters) @@ -180,6 +181,10 @@ def get_queryset(self): # Q-filter objects, which can contain more complex filter options such as OR-clauses q_filters = additional_filters.pop("q_filters") + if "deduplicated_q_filters" in additional_filters: + # Q-filter objects that may produce duplicate results + deduplicated_q_filters = additional_filters.pop("deduplicated_q_filters") + if CS.get_boolean_query_param(self.request, "removed"): additional_filters.update({"removed": True}) self.queryset = self.queryset_unfiltered @@ -200,7 +205,13 @@ def get_queryset(self): # if no fields is relation, select_related will be made empty. self.select_related = [rel for rel in self.select_related if rel in self.fields] - queryset = super().get_queryset().filter(*q_filters, **additional_filters) + queryset = super().get_queryset() + if deduplicated_q_filters: + # run filters that may produce duplicates and deduplicate the results. deduplicating just the ids + # in a subquery is faster than deduplicating the full results when there are a lot of duplicates. + id_query = queryset.filter(*deduplicated_q_filters).values("id").distinct() + queryset = queryset.filter(id__in=id_query) + queryset = queryset.filter(*q_filters, **additional_filters) if self.request.META["REQUEST_METHOD"] in WRITE_OPERATIONS: # for update operations, do not select relations in the original queryset diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index bc4ba1d3..2251cf53 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -25,6 +25,7 @@ remove_keys_recursively, ) +from .auth_service import AuthService from .common_service import CommonService from .datacite_service import DataciteService from .file_service import FileService @@ -108,6 +109,9 @@ def get_queryset_search_params(cls, request): if request.query_params.get("pas_filter", False): cls.set_pas_filter(queryset_search_params, request) + if request.query_params.get("projects", False): + cls.set_projects_filter(queryset_search_params, request) + if CommonService.has_research_agent_query_params(request): cls.set_actor_filters(queryset_search_params, request) @@ -280,6 +284,34 @@ def set_pas_filter(queryset_search_params, request): else: queryset_search_params["q_filters"] = [q_filter] + @staticmethod + def set_projects_filter(queryset_search_params, request): + """ + Filter datasets that belong to any project in comma-separated projects list. + + A dataset belongs to the projects of the files it contains. Because this is a many-to-many + relationship that can return duplicate datasets, it's necessary to remove non-distinct values + from query results. + """ + projects = request.query_params.get("projects").split(",") + + # non-service users can only query their own projects + if not request.user.is_service: + user_projects = [] + if request.user.username != "": + user_projects = AuthService.get_user_projects(request) + if not set(projects).issubset(user_projects): + raise Http403({"detail": ["User is not member of project"]}) + + q_filter = Q(files__project_identifier__in=projects) + if "deduplicated_q_filters" in queryset_search_params: + queryset_search_params["deduplicated_q_filters"].append(q_filter) + else: + queryset_search_params["deduplicated_q_filters"] = [q_filter] + + return queryset_search_params + + @staticmethod def populate_file_details(cr_json, request): """ diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index efbc2f82..ff34d797 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -993,6 +993,11 @@ paths: description: Returns datasets that can be edited with certain api version. Possible values are 1 and 2 type: integer required: false + - name: projects + in: query + description: Filter datasets with comma-separated list of IDA projects + required: false + type: string - $ref: "#/parameters/fields" - $ref: "#/parameters/include_legacy" responses: diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index e4bb502d..e8fe9e2e 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -836,6 +836,48 @@ def test_read_catalog_record_search_by_data_catalog_id(self): self.assertEqual(len(response.data["results"]), 1) self.assertEqual(response.data["results"][0]["data_catalog"]["identifier"], dc_id) + def test_filter_by_projects_for_service(self): + """Filter datasets by projects. Services can access all projects.""" + user = settings.API_TEST_USER + self._use_http_authorization(username=user["username"], password=user["password"]) + response = self.client.get("/rest/datasets?projects=project_x&pagination=false") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), 14) + + response = self.client.get("/rest/datasets?projects=research_project_112&pagination=false") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), 1) + + response = self.client.get( + "/rest/datasets?projects=research_project_112,project_x&pagination=false" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), 15) + + response = self.client.get("/rest/datasets?projects=no_datasets_here&pagination=false") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), 0) + + @responses.activate + def test_filter_by_projects_for_end_user(self): + """Filter datasets by projects. End users can only access their own projects.""" + self._mock_token_validation_succeeds() + self._use_http_authorization( + method="bearer", + token={"group_names": ["IDA01:project_x", "IDA01:no_datasets_here"], "CSCUserName": "testi"} + ) + + response = self.client.get("/rest/datasets?projects=project_x&pagination=false") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), 14) + + response = self.client.get("/rest/datasets?projects=no_datasets_here&pagination=false") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), 0) + + response = self.client.get("/rest/datasets?projects=research_project_112&pagination=false") + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + def test_filter_by_deprecated(self): cr = CatalogRecord.objects.get(pk=1) cr.deprecated = True From 1762bcae16494f3135750ed02eee77b42b5a9353 Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 14 Sep 2021 12:00:46 +0300 Subject: [PATCH 073/160] fix illegal migration method, manage commands on static branch update --- .gitlab-ci.yml | 10 ++++++++++ src/metax_api/migrations/0033_auto_20210201_1551.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0de37e73..c184fae9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -59,6 +59,16 @@ update_proxy: when: always - when: never +update_run_manage_cmd: + stage: update + environment: $CI_COMMIT_REF_NAME + script: + - ansible-playbook -i $ANSIBLE_INVENTORY $MANAGE_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" + rules: + - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ + when: always + - when: never + clean_previous_build: stage: clean_build environment: $CI_COMMIT_REF_NAME diff --git a/src/metax_api/migrations/0033_auto_20210201_1551.py b/src/metax_api/migrations/0033_auto_20210201_1551.py index e90ea617..14f47595 100755 --- a/src/metax_api/migrations/0033_auto_20210201_1551.py +++ b/src/metax_api/migrations/0033_auto_20210201_1551.py @@ -11,7 +11,7 @@ def recount_directory_files(apps, schema_editor): logger.info(f"found {dirs_with_no_files.count()} directories without files") aff_rows = 0 for dir in dirs_with_no_files: - dir.calculate_byte_size_and_file_count() + # dir.calculate_byte_size_and_file_count() aff_rows += 1 logger.info(f"migration 0033 complete with {aff_rows} affected rows") From 695735d6c040fb5826ca11bb0a5240033eb68b4f Mon Sep 17 00:00:00 2001 From: Toni Date: Tue, 14 Sep 2021 12:19:59 +0300 Subject: [PATCH 074/160] multiple update steps are not allowed in gitlab pipelines. Merge update stages --- .gitlab-ci.yml | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c184fae9..02300332 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,20 +49,11 @@ clean_gitlab_env: when: manual - when: never -update_proxy: +update_metax: stage: update environment: $CI_COMMIT_REF_NAME script: - ansible-playbook -i $ANSIBLE_INVENTORY $UPDATE_PROXY_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" - rules: - - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ - when: always - - when: never - -update_run_manage_cmd: - stage: update - environment: $CI_COMMIT_REF_NAME - script: - ansible-playbook -i $ANSIBLE_INVENTORY $MANAGE_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" rules: - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ From 50278c155c42cb8af281afe898777f3ea3cb2c4a Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Tue, 6 Jul 2021 13:29:21 +0300 Subject: [PATCH 075/160] WIP: CSCFAIRMETA-987, initial commit for new api - Count_files returns the count and byte size of files that are part of given project and belongs to a published dataset - The project(s) are given as a comma-separated list in query parameter - WIP: removed parameter for files and datasets and tests --- .../api/rpc/base/views/statistic_rpc.py | 9 +++++++++ src/metax_api/services/statistic_service.py | 17 ++++++++++++++++- .../settings/components/access_control.py | 1 + 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/metax_api/api/rpc/base/views/statistic_rpc.py b/src/metax_api/api/rpc/base/views/statistic_rpc.py index 41e85f71..66df119a 100755 --- a/src/metax_api/api/rpc/base/views/statistic_rpc.py +++ b/src/metax_api/api/rpc/base/views/statistic_rpc.py @@ -136,3 +136,12 @@ def organization_datasets_cumulative(self, request): @action(detail=False, methods=["get"], url_path="unused_files") def unused_files(self, request): return Response(StatisticService.unused_files()) + + @action(detail=False, methods=["get"], url_path="count_files") + def count_files(self, request): + if not request.query_params.get("projects"): + raise Http400("projects parameter is required") + + params = { "projects": list(CS.get_list_query_param(request, "projects")) } + + return Response(StatisticService.count_files(**params)) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index 71d5af7b..3af0f076 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -9,9 +9,11 @@ from django.conf import settings from django.db import connection +from django.db.models import Count, Sum +from django.db.models.functions import Coalesce from metax_api.exceptions import Http400 -from metax_api.models import CatalogRecord, DataCatalog +from metax_api.models import CatalogRecord, DataCatalog, File _logger = logging.getLogger(__name__) @@ -607,3 +609,16 @@ def unused_files(cls): _logger.info("Done retrieving total counts") return file_stats + + @classmethod + def count_files(cls, projects): + file_query = File.objects_unfiltered.filter() + + # "record" is defined for CatalogRecord to enable lookups from Files to CatalogRecord + file_query = file_query.filter(project_identifier__in=projects) \ + .filter(record__state="published") \ + .values("id", "byte_size") \ + .distinct() + + # Coalesce is required to provides default value + return file_query.aggregate(count=Count("id"), byte_size=Coalesce(Sum("byte_size"), 0)) diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 202459c2..387c0c90 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -146,6 +146,7 @@ def __lt__(self, other): api_permissions.rpc.statistics.all_datasets_cumulative.use = [Role.ALL] api_permissions.rpc.statistics.catalog_datasets_cumulative.use = [Role.ALL] api_permissions.rpc.statistics.count_datasets.use = [Role.ALL] +api_permissions.rpc.statistics.count_files.use = [Role.ALL] api_permissions.rpc.statistics.deprecated_datasets_cumulative.use = [Role.ALL] api_permissions.rpc.statistics.end_user_datasets_cumulative.use = [Role.ALL] api_permissions.rpc.statistics.harvested_datasets_cumulative.use = [Role.ALL] From 4d136c29745877de73d02b327c8377dcb64457e0 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Fri, 9 Jul 2021 09:00:31 +0300 Subject: [PATCH 076/160] Counting project files --- src/metax_api/api/rpc/base/views/statistic_rpc.py | 5 ++++- src/metax_api/services/statistic_service.py | 12 ++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/metax_api/api/rpc/base/views/statistic_rpc.py b/src/metax_api/api/rpc/base/views/statistic_rpc.py index 66df119a..96c4713f 100755 --- a/src/metax_api/api/rpc/base/views/statistic_rpc.py +++ b/src/metax_api/api/rpc/base/views/statistic_rpc.py @@ -142,6 +142,9 @@ def count_files(self, request): if not request.query_params.get("projects"): raise Http400("projects parameter is required") - params = { "projects": list(CS.get_list_query_param(request, "projects")) } + params = { + "projects": list(CS.get_list_query_param(request, "projects")), + "removed": request.query_params.get("removed", None), + } return Response(StatisticService.count_files(**params)) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index 3af0f076..392bf591 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -6,7 +6,7 @@ # :license: MIT import logging - +from collections import OrderedDict from django.conf import settings from django.db import connection from django.db.models import Count, Sum @@ -611,12 +611,16 @@ def unused_files(cls): return file_stats @classmethod - def count_files(cls, projects): + def count_files(cls, projects, removed=None): + kwargs = OrderedDict() file_query = File.objects_unfiltered.filter() + kwargs['project_identifier__in'] = projects + kwargs['record__state'] = "published" + if removed is not None: + kwargs['removed'] = removed # "record" is defined for CatalogRecord to enable lookups from Files to CatalogRecord - file_query = file_query.filter(project_identifier__in=projects) \ - .filter(record__state="published") \ + file_query = file_query.filter(**kwargs) \ .values("id", "byte_size") \ .distinct() From 0147d51fcd89da1a5ffe7a9991cf028cf5a615c3 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Wed, 14 Jul 2021 09:13:25 +0300 Subject: [PATCH 077/160] Tests for counting files feature --- src/metax_api/services/statistic_service.py | 2 +- src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index 392bf591..8e79d66a 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -618,7 +618,7 @@ def count_files(cls, projects, removed=None): kwargs['project_identifier__in'] = projects kwargs['record__state'] = "published" if removed is not None: - kwargs['removed'] = removed + kwargs['removed'] = False if removed == 'false' else True # "record" is defined for CatalogRecord to enable lookups from Files to CatalogRecord file_query = file_query.filter(**kwargs) \ .values("id", "byte_size") \ diff --git a/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py b/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py index 4bededc0..840125a9 100755 --- a/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/statistic_rpc.py @@ -9,10 +9,12 @@ from django.conf import settings from django.core.management import call_command +from django.db.models import Count, Sum +from django.db.models.functions import Coalesce from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import CatalogRecord, DataCatalog +from metax_api.models import CatalogRecord, DataCatalog, File from metax_api.models.catalog_record import ACCESS_TYPES from metax_api.tests.api.rest.base.views.datasets.write import CatalogRecordApiWriteCommon from metax_api.tests.utils import TestClassUtils, test_data_file_path From be80c90f5ffcada5fdc610116eacd285c743b3fe Mon Sep 17 00:00:00 2001 From: aptiaine Date: Fri, 16 Jul 2021 11:45:27 +0300 Subject: [PATCH 078/160] Query method changed --- src/metax_api/services/statistic_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index 8e79d66a..21d547e7 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -613,7 +613,7 @@ def unused_files(cls): @classmethod def count_files(cls, projects, removed=None): kwargs = OrderedDict() - file_query = File.objects_unfiltered.filter() + file_query = File.objects_unfiltered.all() kwargs['project_identifier__in'] = projects kwargs['record__state'] = "published" From 646d6908a33a66fc05c8850969446026481f0cfd Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Fri, 13 Aug 2021 16:01:45 +0300 Subject: [PATCH 079/160] added new mimetypes and deleted ids as requested --- .../local-refdata/file_format_version.json | 71 +++++++++++-------- 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/file_format_version.json b/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/file_format_version.json index a7140c51..b0af37db 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/file_format_version.json +++ b/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/file_format_version.json @@ -3,7 +3,6 @@ {"id": "file_format_version_application_epub+zip_2.0.1", "input_file_format": "application/epub+zip", "output_format_version": "2.0.1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_epub+zip_2.0.1"}, {"id": "file_format_version_application_epub+zip_3.0.0", "input_file_format": "application/epub+zip", "output_format_version": "3.0.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_epub+zip_3.0.0"}, {"id": "file_format_version_application_epub+zip_3.0.1", "input_file_format": "application/epub+zip", "output_format_version": "3.0.1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_epub+zip_3.0.1"}, - {"id": "file_format_version_application_epub+zip_3.1", "input_file_format": "application/epub+zip", "output_format_version": "3.1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_epub+zip_3.1"}, {"id": "file_format_version_application_xhtml+xml_1.0", "input_file_format": "application/xhtml+xml", "output_format_version": "1.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_xhtml+xml_1.0"}, {"id": "file_format_version_application_xhtml+xml_1.1", "input_file_format": "application/xhtml+xml", "output_format_version": "1.1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_xhtml+xml_1.1"}, {"id": "file_format_version_text_xml_1.0", "input_file_format": "text/xml", "output_format_version": "1.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/text_xml_1.0"}, @@ -51,36 +50,11 @@ {"id": "file_format_version_image_jp2", "input_file_format": "image/jp2", "output_format_version": "", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_jp2"}, {"id": "file_format_version_image_tiff_6.0", "input_file_format": "image/tiff", "output_format_version": "6.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_tiff_6.0"}, {"id": "file_format_version_image_png_1.2", "input_file_format": "image/png", "output_format_version": "1.2", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_png_1.2"}, - {"id": "file_format_version_application_x-internet-archive_1.0", "input_file_format": "application/x-internet-archive", "output_format_version": "1.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_x-internet-archive_1.0"}, - {"id": "file_format_version_application_x-internet-archive_1.1", "input_file_format": "application/x-internet-archive", "output_format_version": "1.1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_x-internet-archive_1.1"}, - {"id": "file_format_version_application_warc_0.17", "input_file_format": "application/warc", "output_format_version": "0.17", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_warc_0.17"}, {"id": "file_format_version_application_warc_1.0", "input_file_format": "application/warc", "output_format_version": "1.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_warc_1.0"}, {"id": "file_format_version_image_tiff_1.0", "input_file_format": "image/tiff", "output_format_version": "1.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_tiff_1.0"}, {"id": "file_format_version_application_gml+xml_3.2.1", "input_file_format": "application/gml+xml", "output_format_version": "3.2.1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_gml+xml_3.2.1"}, {"id": "file_format_version_application_vnd.google-earth.kml+xml_2.3", "input_file_format": "application/vnd.google-earth.kml+xml", "output_format_version": "2.3", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.google-earth.kml+xml_2.3"}, {"id": "file_format_version_application_x-spss-por", "input_file_format": "application/x-spss-por", "output_format_version": "", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_x-spss-por"}, - {"id": "file_format_version_application_msword_8.0", "input_file_format": "application/msword", "output_format_version": "8.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_msword_8.0"}, - {"id": "file_format_version_application_msword_8.5", "input_file_format": "application/msword", "output_format_version": "8.5", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_msword_8.5"}, - {"id": "file_format_version_application_msword_9.0", "input_file_format": "application/msword", "output_format_version": "9.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_msword_9.0"}, - {"id": "file_format_version_application_msword_10.0", "input_file_format": "application/msword", "output_format_version": "10.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_msword_10.0"}, - {"id": "file_format_version_application_msword_11.0", "input_file_format": "application/msword", "output_format_version": "11.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_msword_11.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.wordprocessingml.document_12.0", "input_file_format": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "output_format_version": "12.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.wordprocessingml.document_12.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.wordprocessingml.document_14.0", "input_file_format": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "output_format_version": "14.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.wordprocessingml.document_14.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.wordprocessingml.document_15.0", "input_file_format": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "output_format_version": "15.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.wordprocessingml.document_15.0"}, - {"id": "file_format_version_application_vnd.ms-excel_8.0", "input_file_format": "application/vnd.ms-excel", "output_format_version": "8.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-excel_8.0"}, - {"id": "file_format_version_application_vnd.ms-excel_9.0", "input_file_format": "application/vnd.ms-excel", "output_format_version": "9.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-excel_9.0"}, - {"id": "file_format_version_application_vnd.ms-excel_10.0", "input_file_format": "application/vnd.ms-excel", "output_format_version": "10.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-excel_10.0"}, - {"id": "file_format_version_application_vnd.ms-excel_11.0", "input_file_format": "application/vnd.ms-excel", "output_format_version": "11.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-excel_11.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.spreadsheetml.sheet_12.0", "input_file_format": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "output_format_version": "12.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.spreadsheetml.sheet_12.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.spreadsheetml.sheet_14.0", "input_file_format": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "output_format_version": "14.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.spreadsheetml.sheet_14.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.spreadsheetml.sheet_15.0", "input_file_format": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "output_format_version": "15.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.spreadsheetml.sheet_15.0"}, - {"id": "file_format_version_application_vnd.ms-powerpoint_8.0", "input_file_format": "application/vnd.ms-powerpoint", "output_format_version": "8.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-powerpoint_8.0"}, - {"id": "file_format_version_application_vnd.ms-powerpoint_9.0", "input_file_format": "application/vnd.ms-powerpoint", "output_format_version": "9.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-powerpoint_9.0"}, - {"id": "file_format_version_application_vnd.ms-powerpoint_10.0", "input_file_format": "application/vnd.ms-powerpoint", "output_format_version": "10.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-powerpoint_10.0"}, - {"id": "file_format_version_application_vnd.ms-powerpoint_11.0", "input_file_format": "application/vnd.ms-powerpoint", "output_format_version": "11.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.ms-powerpoint_11.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.presentationml.presentation_12.0", "input_file_format": "application/vnd.openxmlformats-officedocument.presentationml.presentation", "output_format_version": "12.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.presentationml.presentation_12.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.presentationml.presentation_14.0", "input_file_format": "application/vnd.openxmlformats-officedocument.presentationml.presentation", "output_format_version": "14.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.presentationml.presentation_14.0"}, - {"id": "file_format_version_application_vnd.openxmlformats-officedocument.presentationml.presentation_15.0", "input_file_format": "application/vnd.openxmlformats-officedocument.presentationml.presentation", "output_format_version": "15.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_vnd.openxmlformats-officedocument.presentationml.presentation_15.0"}, {"id": "file_format_version_application_pdf_1.2", "input_file_format": "application/pdf", "output_format_version": "1.2", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_pdf_1.2"}, {"id": "file_format_version_application_pdf_1.3", "input_file_format": "application/pdf", "output_format_version": "1.3", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_pdf_1.3"}, {"id": "file_format_version_application_pdf_1.4", "input_file_format": "application/pdf", "output_format_version": "1.4", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_pdf_1.4"}, @@ -88,7 +62,6 @@ {"id": "file_format_version_application_pdf_1.6", "input_file_format": "application/pdf", "output_format_version": "1.6", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_pdf_1.6"}, {"id": "file_format_version_application_pdf_1.7", "input_file_format": "application/pdf", "output_format_version": "1.7", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_pdf_1.7"}, {"id": "file_format_version_audio_x-aiff", "input_file_format": "audio/x-aiff", "output_format_version": "", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/audio_x-aiff"}, - {"id": "file_format_version_audio_mpeg", "input_file_format": "audio/mpeg", "output_format_version": "", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/audio_mpeg"}, {"id": "file_format_version_audio_x-ms-wma_9", "input_file_format": "audio/x-ms-wma", "output_format_version": "9", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/audio_x-ms-wma_9"}, {"id": "file_format_version_video_dv", "input_file_format": "video/dv", "output_format_version": "", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/video_dv"}, {"id": "file_format_version_video_mpeg_1", "input_file_format": "video/mpeg", "output_format_version": "1", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/video_mpeg_1"}, @@ -96,5 +69,47 @@ {"id": "file_format_version_video_x-ms-wmv_9", "input_file_format": "video/x-ms-wmv", "output_format_version": "9", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/video_x-ms-wmv_9"}, {"id": "file_format_version_application_postscript_3.0", "input_file_format": "application/postscript", "output_format_version": "3.0", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/application_postscript_3.0"}, {"id": "file_format_version_image_gif_1987a", "input_file_format": "image/gif", "output_format_version": "1987a", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_gif_1987a"}, - {"id": "file_format_version_image_gif_1989a", "input_file_format": "image/gif", "output_format_version": "1989a", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_gif_1989a"} + {"id": "file_format_version_image_gif_1989a", "input_file_format": "image/gif", "output_format_version": "1989a", "uri": "http://uri.suomi.fi/codelist/fairdata/file_format_version/code/image_gif_1989a"}, + {"mimetype": "application/epub+zip", "version": "3.2"}, + {"mimetype": "application/matlab", "version": "7"}, + {"mimetype": "application/matlab", "version": "7.3"}, + {"mimetype": "application/msword", "version": "97-2003"}, + {"mimetype": "application/mxf", "version": ""}, + {"mimetype": "application/vnd.ms-excel", "version": "8"}, + {"mimetype": "application/vnd.ms-excel", "version": "8X"}, + {"mimetype": "application/vnd.ms-powerpoint", "version": "97-2003"}, + {"mimetype": "application/vnd.oasis.opendocument.formula", "version": "1.3"}, + {"mimetype": "application/vnd.oasis.opendocument.graphics", "version": "1.3"}, + {"mimetype": "application/vnd.oasis.opendocument.presentation", "version": "1.3"}, + {"mimetype": "application/vnd.oasis.opendocument.spreadsheet", "version": "1.3"}, + {"mimetype": "application/vnd.oasis.opendocument.text", "version": "1.3"}, + {"mimetype": "application/vnd.openxmlformats-officedocument.presentationml.presentation", "version": "2007 onwards"}, + {"mimetype": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "version": "2007 onwards"}, + {"mimetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "version": "2007 onwards"}, + {"mimetype": "application/x-hdf5", "version": "1.10"}, + {"mimetype": "application/x-siard", "version": "2.0"}, + {"mimetype": "application/x-siard", "version": "2.1"}, + {"mimetype": "application/xhtml+xml", "version": "5.0"}, + {"mimetype": "audio/L16", "version": ""}, + {"mimetype": "audio/L20", "version": ""}, + {"mimetype": "audio/L24", "version": ""}, + {"mimetype": "audio/L8", "version": ""}, + {"mimetype": "audio/mpeg", "version": "1"}, + {"mimetype": "audio/mpeg", "version": "2"}, + {"mimetype": "image/svg+xml", "version": "1.1"}, + {"mimetype": "image/tiff", "version": "1.5"}, + {"mimetype": "text/html", "version": "5.0"}, + {"mimetype": "text/html", "version": "5.1"}, + {"mimetype": "text/html", "version": "5.2"}, + {"mimetype": "text/xml", "version": "1.1"}, + {"mimetype": "video/avi", "version": ""}, + {"mimetype": "video/mj2", "version": ""}, + {"mimetype": "video/MP1S", "version": ""}, + {"mimetype": "video/MP2P", "version": ""}, + {"mimetype": "video/MP2T", "version": ""}, + {"mimetype": "video/MP2T", "version": ""}, + {"mimetype": "video/quicktime", "version": ""}, + {"mimetype": "video/x-ffv", "version": "3"}, + {"mimetype": "video/x-matroska", "version": "4"}, + {"mimetype": "video/x-ms-asf", "version": ""} ] From 7de22495cc4456a0d772dd3c9d58f1ba9266755f Mon Sep 17 00:00:00 2001 From: Toni Date: Thu, 23 Sep 2021 12:25:56 +0300 Subject: [PATCH 080/160] Debug toolbar can have negative effect on performance, make it optional with env-var --- ENV_VARS.md | 1 + src/metax_api/settings/__init__.py | 1 + src/metax_api/settings/environments/local.py | 6 ++++-- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ENV_VARS.md b/ENV_VARS.md index c3fa375c..7afadd56 100755 --- a/ENV_VARS.md +++ b/ENV_VARS.md @@ -14,6 +14,7 @@ copy .env.template to .env and fill the required values from below table. Requir | DATACITE_URL | yes | | | DATACITE_USERNAME | yes | | | DEBUG | no | False | +| DEBUG_TOOLBAR_ENABLED | not | True | Enable debug toolbar on local environment | | DJANGO_ENV | no | local | Specifies the environment, corresponds with the environments found in src/metax_api/settings/environments/ | | DJANGO_SECRET_KEY | yes | | | ELASTIC_SEARCH_HOSTS | no | localhost | Elastic Search instance IPs | diff --git a/src/metax_api/settings/__init__.py b/src/metax_api/settings/__init__.py index cbf5c837..b091c712 100755 --- a/src/metax_api/settings/__init__.py +++ b/src/metax_api/settings/__init__.py @@ -23,6 +23,7 @@ ALWAYS_RELOAD_REFERENCE_DATA_ON_RESTART=(bool, True), API_USERS_PATH=(str, "/etc/fairdata-metax/api_users"), DEBUG=(bool, False), + DEBUG_TOOLBAR_ENABLED=(bool, True), DJANGO_ENV=(str, "local"), ELASTIC_SEARCH_HOSTS=(list, ["localhost"]), ELASTIC_SEARCH_PORT=(int, 9200), diff --git a/src/metax_api/settings/environments/local.py b/src/metax_api/settings/environments/local.py index f0feecba..bed7702b 100755 --- a/src/metax_api/settings/environments/local.py +++ b/src/metax_api/settings/environments/local.py @@ -1,11 +1,13 @@ +from metax_api.settings import env from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.components.common import ALLOWED_HOSTS, DEBUG, INSTALLED_APPS, MIDDLEWARE + ALLOWED_HOSTS += ["*"] -if "debug_toolbar" not in INSTALLED_APPS: +if "debug_toolbar" not in INSTALLED_APPS and env("DEBUG_TOOLBAR_ENABLED"): INSTALLED_APPS += ["debug_toolbar"] -if "debug_toolbar.middleware.DebugToolbarMiddleware" not in MIDDLEWARE: +if "debug_toolbar.middleware.DebugToolbarMiddleware" not in MIDDLEWARE and env("DEBUG_TOOLBAR_ENABLED"): MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"] + MIDDLEWARE INTERNAL_IPS = ["127.0.0.1", "0.0.0.0"] From 2a5c2bf4f266aa9b60ab3443d8da11063ff512b6 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Mon, 23 Aug 2021 16:29:07 +0300 Subject: [PATCH 081/160] added functionality to filter actors by organisational/person id and added tests + modified swagger documentation for this accordingly --- .../services/catalog_record_service.py | 91 ++++++++++++------- src/metax_api/swagger/v1/swagger.yaml | 7 +- src/metax_api/swagger/v2/swagger.yaml | 7 +- .../api/rest/base/views/datasets/read.py | 28 ++++++ 4 files changed, 98 insertions(+), 35 deletions(-) diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 2251cf53..15cd1ae7 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -5,6 +5,7 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT import logging +import re import urllib.parse from collections import defaultdict from os.path import dirname, join @@ -167,16 +168,22 @@ def set_actor_filters(queryset_search_params, request): """ def _get_person_filter(agent, person): + param = "name" + # check if query parameter is person's ID + if re.search(r"((\d*-\d*)+)", person): + person = "http://orcid.org/" + person + param = "identifier" + name_filter = Q() # only one publisher possible if agent == "publisher": - name_filter |= Q(**{f"research_dataset__{agent}__name__iregex": person}) + name_filter |= Q(**{f"research_dataset__{agent}__{param}__iregex": person}) else: # having same problem as in set_pas_filter below.. for i in range(3): - name_filter |= Q(**{f"research_dataset__{agent}__{i}__name__iregex": person}) + name_filter |= Q(**{f"research_dataset__{agent}__{i}__{param}__iregex": person}) - name_filter |= Q(**{f"research_dataset__{agent}__contains": [{"name": person}]}) + name_filter |= Q(**{f"research_dataset__{agent}__contains": [{param: person}]}) # regex will find matches from organization name fields so have to disable it person_filter = Q(**{f"research_dataset__{agent}__contains": [{"@type": "Person"}]}) @@ -185,44 +192,66 @@ def _get_person_filter(agent, person): return name_filter def _get_org_filter(agent, org): + name = "name" + name_en = "name__en" + name_fi = "name__fi" + # check if query parameter is organizational ID + if re.search(r"\b\d{5}\b", org[:5]): + org = "http://uri.suomi.fi/codelist/fairdata/organization/code/" + org + name = "identifier" + name_en = "identifier" + name_fi = "identifier" + name_filter = Q() # only one publisher possible if agent == "publisher": - name_filter |= Q(**{f"research_dataset__{agent}__name__en__iregex": org}) - name_filter |= Q(**{f"research_dataset__{agent}__name__fi__iregex": org}) - name_filter |= Q(**{f"research_dataset__{agent}__member_of__name__en__iregex": org}) - name_filter |= Q(**{f"research_dataset__{agent}__member_of__name__fi__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__{name_en}__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__{name_fi}__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__member_of__{name_en}__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__member_of__{name_fi}__iregex": org}) else: for i in range(3): - name_filter |= Q(**{f"research_dataset__{agent}__{i}__name__en__iregex": org}) - name_filter |= Q(**{f"research_dataset__{agent}__{i}__name__fi__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__{i}__{name_en}__iregex": org}) + name_filter |= Q(**{f"research_dataset__{agent}__{i}__{name_fi}__iregex": org}) name_filter |= Q( - **{f"research_dataset__{agent}__{i}__member_of__name__en__iregex": org} + **{f"research_dataset__{agent}__{i}__member_of__{name_en}__iregex": org} ) name_filter |= Q( - **{f"research_dataset__{agent}__{i}__member_of__name__fi__iregex": org} + **{f"research_dataset__{agent}__{i}__member_of__{name_fi}__iregex": org} ) - name_filter |= Q( - **{f"research_dataset__{agent}__contains": [{"name": {"en": org}}]} - ) - name_filter |= Q( - **{f"research_dataset__{agent}__contains": [{"name": {"fi": org}}]} - ) - name_filter |= Q( - **{ - f"research_dataset__{agent}__contains": [ - {"member_of": {"name": {"en": org}}} - ] - } - ) - name_filter |= Q( - **{ - f"research_dataset__{agent}__contains": [ - {"member_of": {"name": {"fi": org}}} - ] - } - ) + if name == "name": + name_filter |= Q( + **{f"research_dataset__{agent}__contains": [{name: {"en": org}}]} + ) + name_filter |= Q( + **{f"research_dataset__{agent}__contains": [{name: {"fi": org}}]} + ) + name_filter |= Q( + **{ + f"research_dataset__{agent}__contains": [ + {"member_of": {name: {"en": org}}} + ] + } + ) + name_filter |= Q( + **{ + f"research_dataset__{agent}__contains": [ + {"member_of": {name: {"fi": org}}} + ] + } + ) + else: + name_filter |= Q( + **{f"research_dataset__{agent}__contains": [{name: org}]} + ) + name_filter |= Q( + **{ + f"research_dataset__{agent}__contains": [ + {"member_of": {name: org}} + ] + } + ) return name_filter diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index ff34d797..9c76ecc8 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -976,15 +976,18 @@ paths: - name: actor_filter in: query description: | - Actor_filters are a collection of filter parameters for filtering according to the name + Actor_filters are a collection of filter parameters for filtering according to the name or organizational/person ID of creator, curator, publisher or rights_holder actors. Actor type must be defined as a suffix in the filter name ('_person' or '_organization'). Actor type '_organization' finds matches from "is_member_of" -field if actor is a person. Multiple actor_filters can be applied simultaneously (AND) - or separately (OR) by using 'condition_separator'. Default separator is AND. + or separately (OR) by using 'condition_separator'. Default separator is AND. If filtered by organizational/person ID, + search needs to be made using complete IDs. Complete person IDs consist of 19 characters matching the following pattern: 0000-0002-1825-0097. + Complete organizational IDs consist of at least 5 characters, the 5 first characters always being numbers (e.g. 00170 or 01901-H960). Examples: '?creator_person=person_name' + '?creator_organization=organizational_id' '?publisher_organization=some organisation&creator_person=person_name&condition_separator=or' required: false type: string diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 58c8b7c5..682ffdf8 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -981,15 +981,18 @@ paths: - name: actor_filter in: query description: | - Actor_filters are a collection of filter parameters for filtering according to the name + Actor_filters are a collection of filter parameters for filtering according to the name or organizational/person ID of creator, curator, publisher or rights_holder actors. Actor type must be defined as a suffix in the filter name ('_person' or '_organization'). Actor type '_organization' finds matches from "is_member_of" -field if actor is a person. Multiple actor_filters can be applied simultaneously (AND) - or separately (OR) by using 'condition_separator'. Default separator is AND. + or separately (OR) by using 'condition_separator'. Default separator is AND. If filtered by organizational/person ID, + search needs to be made using complete IDs. Complete person IDs consist of 19 characters matching the following pattern: 0000-0002-1825-0097. + Complete organizational IDs consist of at least 5 characters, the 5 first characters always being numbers (e.g. 00170 or 01901-H960). Examples: '?creator_person=person_name' + '?creator_organization=organizational_id' '?publisher_organization=some organisation&creator_person=person_name&condition_separator=or' required: false type: string diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index e8fe9e2e..98b255bc 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -594,6 +594,34 @@ def test_agents_and_actors(self): ) self.assertEqual(len(response.data["results"]), 0) + def test_agents_and_actors_with_ids(self): + # set test conditions + cr = CatalogRecord.objects.get(pk=11) + cr.research_dataset["curator"] = [] + cr.research_dataset["curator"].append( + { + "@type": "Person", + "name": "Tarmo Termiitti", + "member_of": { + "identifier": "org_identifier", + "name": {"en": "Unique Organization"}, + }, + "identifier": "http://orcid.org/1234-1234-1234-1234", + } + ) + cr.research_dataset["creator"] = [] + cr.research_dataset["creator"].append( + {"@type": "Organization", "name": {"en": "Unique Organization"}, "identifier": "http://uri.suomi.fi/codelist/fairdata/organization/code/1234567"} + ) + cr.force_save() + + # test that querys can be made also with organizational and persons' IDs + response = self.client.get("/rest/datasets?creator_organization=1234567") + self.assertEqual(len(response.data["results"]), 1, response.data) + + response = self.client.get("/rest/datasets?curator_person=1234-1234-1234-1234") + self.assertEqual(len(response.data["results"]), 1, response.data) + class CatalogRecordApiReadPASFilter(CatalogRecordApiReadCommon): def test_pas_filter(self): From 2a5bb4f5c60dd522d9b7a6590f30f4209d9d35a5 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Wed, 25 Aug 2021 11:46:23 +0300 Subject: [PATCH 082/160] unauthenticated users have now an empty string as username instead of None --- src/metax_api/models/catalog_record.py | 2 +- src/metax_api/services/catalog_record_service.py | 2 +- src/metax_api/tests/api/rest/base/views/common/auth.py | 9 +++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index a8b76cfb..9a499bde 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -531,7 +531,7 @@ def user_has_access(self, request): return True elif request.method in READ_METHODS: - if request.user.username is None: # unauthenticated user + if request.user.username == "": # unauthenticated user if self.state == self.STATE_PUBLISHED: return True else: diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 15cd1ae7..3f144272 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -141,7 +141,7 @@ def filter_by_state(request, queryset_search_params): """ state_filter = None - if request.user.username is None: # unauthenticated user + if request.user.username == "": # unauthenticated user state_filter = Q(state="published") elif request.user.is_service: # service account pass diff --git a/src/metax_api/tests/api/rest/base/views/common/auth.py b/src/metax_api/tests/api/rest/base/views/common/auth.py index 2d477554..500e8d8a 100755 --- a/src/metax_api/tests/api/rest/base/views/common/auth.py +++ b/src/metax_api/tests/api/rest/base/views/common/auth.py @@ -75,6 +75,15 @@ def test_delete_access_error(self): response = self.client.delete("/rest/files/1") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + def test_published(self): + """ + Unauthenticated users should only be able to see published datasets. + """ + self.client._credentials = {} + response = self.client.get("/rest/datasets") + for cr in response.data['results']: + self.assertEqual(cr['state'], 'published') + def test_read_for_datasets_world_ok(self): """ Reading datasets api should be permitted even without any authorization. From 984725e69e25af7d76ec94c0a7e3d6c944424fce Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 27 Sep 2021 16:59:34 +0300 Subject: [PATCH 083/160] make datamigration more readable --- .../migrations/0039_auto_20210908_1151.py | 77 +++++++++---------- 1 file changed, 36 insertions(+), 41 deletions(-) diff --git a/src/metax_api/migrations/0039_auto_20210908_1151.py b/src/metax_api/migrations/0039_auto_20210908_1151.py index fd09268d..dea97485 100644 --- a/src/metax_api/migrations/0039_auto_20210908_1151.py +++ b/src/metax_api/migrations/0039_auto_20210908_1151.py @@ -7,6 +7,29 @@ logger = logging.getLogger(__name__) +def build_email_string(f_name, l_name, provider, domain_type): + return f"{f_name}.{l_name}@{provider}.{domain_type}" + +def replace_access_granter(cr, old_name, old_email, old_user_id, new_name, new_email, new_user_id): + logger.info(f"replacing access granter: {old_name}, {old_email}, {old_user_id} with {new_name}, {new_email}, {new_user_id}") + if cr.access_granter: + if cr.access_granter["name"] == old_name: + cr.access_granter["name"] = new_name + logger.info("access granter name changed") + if cr.access_granter["email"] == old_email: + cr.access_granter["email"] = new_email + logger.info("access granter email changed") + if cr.access_granter["userid"] == old_user_id: + cr.access_granter["userid"] = new_user_id + logger.info("access granter user_id changed") + +def replace_metadata_provider_user(cr, old_user, new_user): + logger.info(f"replacing metadata_provider_user: {old_user} with new_user: {new_user}") + if cr.metadata_provider_user: + if cr.metadata_provider_user == old_user: + cr.metadata_provider_user = new_user + logger.info("metadata_provider_user changed") + datasets = [ "1acea3cc-c245-4f6d-80ee-f9c6a6c907be", "72dc2050-70bc-4f46-9fc7-b3ac2bd58bdc", @@ -15,43 +38,20 @@ "51c7c6c4-0129-4856-be71-072582882cb5", "56ab6296-9bc9-41b3-95ea-b4655aa7978b", "66cc0770-08f9-4df3-ac6f-b65a15f5e73e", + "cr955e904-e3dd-4d7e-99f1-3fed446f9630" # test ] -""" -Changes: - -metadata_provider_user: tuokalma -to -> -metadata_provider_user: ksuomine - -access_granter: -name: Alma Tuokko -email: alma.tuokko@helsinki.fi -userid: tuokalma -to -> -access_granter: -name: Karina Lukin -email: karina.lukin@helsinki.fi -userid: ksuomine -""" - def change_metadata_provider_user(apps, schema_editor): CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') for cr in CatalogRecord.objects.filter(identifier__in=datasets): try: - if cr.metadata_provider_user: - provider_usr = json.loads(json.dumps(cr.metadata_provider_user)) - changed_usr = provider_usr.replace("tuokalma", "ksuomine") - cr.metadata_provider_user = changed_usr - - if cr.access_granter: - access_granter = json.loads(json.dumps(cr.access_granter)) - changed_granter = access_granter.replace("Alma Tuokko", "Karina Lukin") - changed_granter = changed_granter.replace("alma.tuokko@helsinki.fi", "karina.lukin@helsinki.fi") - changed_granter = changed_granter.replace("tuokalma", "ksuomine") - cr.access_granter = changed_granter - + logger.info(f"changing access granter for cr {cr.identifier}") + old_mail = build_email_string("alma", "tuokko", "helsinki", "fi") + new_email = build_email_string("karina", "lukin", "helsinki", "fi") + replace_metadata_provider_user(cr, "tuokalma", "ksuomine") + replace_access_granter(cr, "Alma Tuokko", old_mail, "tuokalma", "Karina Lukin", new_email, "ksuomine") cr.save() + logger.info("cr save successful") except Exception as e: logger.error(e) @@ -60,19 +60,14 @@ def revert(apps, schema_editor): CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') for cr in CatalogRecord.objects.filter(identifier__in=datasets): try: - if cr.metadata_provider_user: - provider_usr = json.loads(json.dumps(cr.metadata_provider_user)) - changed_usr = provider_usr.replace("ksuomine", "tuokalma") - cr.metadata_provider_user = changed_usr - - if cr.access_granter: - access_granter = json.loads(json.dumps(cr.access_granter)) - changed_granter = access_granter.replace("Karina Lukin", "Alma Tuokko") - changed_granter = changed_granter.replace("karina.lukin@helsinki.fi", "alma.tuokko@helsinki.fi") - changed_granter = changed_granter.replace("ksuomine", "tuokalma") - cr.access_granter = changed_granter + logger.info(f"changing access granter for cr {cr.identifier}") + old_mail = build_email_string("alma", "tuokko", "helsinki", "fi") + new_email = build_email_string("karina", "lukin", "helsinki", "fi") + replace_metadata_provider_user(cr, "ksuomine", "tuokalma") + replace_access_granter(cr, "Karina Lukin", new_email, "ksuomine", "Alma Tuokko", old_mail, "tuokalma", ) cr.save() + logger.info("cr save successful") except Exception as e: logger.error(e) From 73cf50d87594ad1cda3aa49f9917e3a0471b62f4 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 28 Sep 2021 10:46:06 +0300 Subject: [PATCH 084/160] datacatalog changed from ida to pas for pas templates --- src/metax_api/api/rpc/base/views/dataset_rpc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/metax_api/api/rpc/base/views/dataset_rpc.py b/src/metax_api/api/rpc/base/views/dataset_rpc.py index c66fb3f1..9a8f2336 100755 --- a/src/metax_api/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/base/views/dataset_rpc.py @@ -49,9 +49,11 @@ def get_minimal_dataset_template(self, request): with open("metax_api/exampledata/dataset_minimal.json", "rb") as f: example_ds = load(f) - example_ds["data_catalog"] = django_settings.END_USER_ALLOWED_DATA_CATALOGS[0] + example_ds["data_catalog"] = django_settings.IDA_DATA_CATALOG_IDENTIFIER if request.query_params["type"].endswith("_pas"): + example_ds["data_catalog"] = django_settings.PAS_DATA_CATALOG_IDENTIFIER + if 'issued' not in example_ds['research_dataset']: example_ds['research_dataset']['issued'] = '2019-01-01' if 'publisher' not in example_ds['research_dataset']: From e4d11e6204e41518fb8c064c17d8dcd5dc50536b Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 5 Oct 2021 13:35:21 +0300 Subject: [PATCH 085/160] changed relation type labels in reference data --- .../refdata_indexer/resources/local-refdata/relation_type.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/relation_type.json b/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/relation_type.json index 66401c9e..a57a2f22 100755 --- a/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/relation_type.json +++ b/src/metax_api/tasks/refdata/refdata_indexer/resources/local-refdata/relation_type.json @@ -1,4 +1,4 @@ -[{"id":"relation", "uri":"http://purl.org/dc/terms/relation", "label":{"fi":"LiittyvƤ aineisto", "en":"Related dataset"}}, +[{"id":"relation", "uri":"http://purl.org/dc/terms/relation", "label":{"fi":"Liittyy", "en":"Relation"}}, {"id":"cites", "uri":"http://purl.org/spar/cito/cites", "label":{"fi":"Viittaa", "en":"Cites"}}, {"id":"isCitedBy", "uri":"http://purl.org/spar/cito/isCitedBy", "label":{"fi":"Viitattu", "en":"Is cited by"}}, {"id":"isSupplementTo", "uri":"http://purl.org/vocab/frbr/core#isSupplementTo", "label":{"fi":"Viittaus tausta-aineistoon", "en":"Is supplement to"}}, From f6886c8faf1e225fd8d02aa0ee261a00e7923a7e Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Wed, 6 Oct 2021 17:04:03 +0300 Subject: [PATCH 086/160] added a migration file to change stored labels in database --- .../migrations/0040_auto_20211006_1116.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 src/metax_api/migrations/0040_auto_20211006_1116.py diff --git a/src/metax_api/migrations/0040_auto_20211006_1116.py b/src/metax_api/migrations/0040_auto_20211006_1116.py new file mode 100644 index 00000000..1256fb18 --- /dev/null +++ b/src/metax_api/migrations/0040_auto_20211006_1116.py @@ -0,0 +1,72 @@ +# Generated by Django 3.1.12 on 2021-10-06 08:16 + +from django.db import migrations + +import logging + +logger = logging.getLogger(__name__) + +def replace_relation_type_labels(cr, language, old_label, new_label): + logger.info(f"replacing relation type label ({language}): {old_label} with {new_label}") + if cr.research_dataset.get("relation"): + for item in cr.research_dataset["relation"]: + if item.get("relation_type"): + if item.get("relation_type").get("pref_label"): + if item.get("relation_type").get("pref_label").get(language): + if item["relation_type"]["pref_label"][language] == old_label: + item["relation_type"]["pref_label"][language] = new_label + logger.info("relation type label changed") + +datasets = [ + "c1974353-a4f6-40c3-9c1c-a7ec99e28ac8", + "7c12b39b-0bd4-4611-a764-85f328450e2b", + "ddd9854f-eb6a-4a8b-8f0d-2d2b9c405436", + "ef224b4b-6f0b-4776-ba33-6f8bd31855cc", + "0cfa3182-48a8-4483-8ebd-2636509b8363", + "0b6cfb0e-ea71-414f-abd3-88bb415b85fe", + "fe5e67ba-fc36-449d-b83e-e1e286cc9dda", + "254acb9a-32c7-45cd-9ac1-e52ef2d6174e", + "69b0360e-2ac8-4008-ac8d-c47b8c76d47e", + "087f9d18-5196-4e6e-83a0-dfe2d94c5aa6", + "a3aa061f-1263-4fb5-887b-a465f4c3a912", + "acf70d04-31f3-4e13-a422-666e39524b71", + "5cf11735-8422-4113-beb0-997c11b7c797", + "a5a04b82-6270-4e44-b357-a1344180bc1f" +] + +def change_relation_type_labels(apps, schema_editor): + CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') + for cr in CatalogRecord.objects.filter(identifier__in=datasets): + try: + logger.info(f"changing relation type labels for cr {cr.identifier}") + replace_relation_type_labels(cr, "fi", "LiittyvƤ aineisto", "Liittyy") + replace_relation_type_labels(cr, "und", "LiittyvƤ aineisto", "Liittyy") + replace_relation_type_labels(cr, "en", "Related dataset", "Relation") + cr.save() + logger.info("cr save successful") + except Exception as e: + logger.error(e) + +def revert(apps, schema_editor): + CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') + for cr in CatalogRecord.objects.filter(identifier__in=datasets): + try: + logger.info(f"changing relation type labels for cr {cr.identifier}") + replace_relation_type_labels(cr, "fi", "Liittyy", "LiittyvƤ aineisto") + replace_relation_type_labels(cr, "und", "Liittyy", "LiittyvƤ aineisto") + replace_relation_type_labels(cr, "en", "Relation", "Related dataset") + cr.save() + logger.info("cr save successful") + except Exception as e: + logger.error(e) + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0039_auto_20210908_1151'), + ] + + operations = [ + migrations.RunPython(change_relation_type_labels, revert), + ] \ No newline at end of file From a9093e028c956fa6e351c6d3f1ce6a64b8e3b697 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 3 May 2021 14:17:44 +0300 Subject: [PATCH 087/160] Add repotronic catalog, user and required api access --- src/metax_api/initialdata/datacatalogs.json | 47 +++++++++++++++++++ .../settings/components/access_control.py | 1 + src/metax_api/settings/environments/stable.py | 6 +-- 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 539a26d1..707ce27f 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -418,4 +418,51 @@ "catalog_record_services_edit": "metax,jyu", "catalog_record_services_create": "metax,jyu", "catalog_record_services_read": "metax,jyu,download" +}, +{ + "catalog_json": { + "title": { + "en": "Repotronic catalog", + "fi": "Repotronic katalogi" + }, + "language": [ + { + "identifier": "http://lexvo.org/id/iso639-3/fin" + } + ], + "harvested": false, + "publisher": { + "name": { + "en": "Repotronic", + "fi": "Repotronic" + } + }, + "identifier": "urn:nbn:fi:att:data-catalog-repotronic", + "access_rights": { + "license": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0" + } + ], + "access_type": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } + } + ], + "description": { + "en": "Contains datasets from Repotronic service", + "fi": "SisƤltƤƤ aineistoja Repotronic-palvelusta" + } + }, + "dataset_versioning": false, + "research_dataset_schema": "att" + }, + "catalog_record_services_edit": "metax,repotronic", + "catalog_record_services_create": "metax,repotronic", + "catalog_record_services_read": "metax,repotronic" }] \ No newline at end of file diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 387c0c90..756d1a57 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -36,6 +36,7 @@ class Role(Enum): API_AUTH_USER = "api_auth_user" EXTERNAL = "external" JYU = "jyu" + REPOTRONIC = "repotronic" def __ge__(self, other): if self.__class__ is other.__class__: diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index 2f59d398..95fd4961 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -1,9 +1,9 @@ from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 -api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU] -api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU] -api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU] +api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC] +api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC] +api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC] api_permissions.rest.directories.read += [Role.IDA, Role.QVAIN_LIGHT] From ac3ba86c0ba5871a1761d0d6dfaddf76673f8764 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 10 May 2021 09:14:18 +0300 Subject: [PATCH 088/160] CSCFAIRMETA-1027: make repotronic legacy --- src/metax_api/settings/components/common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 24529eb2..d2f9a447 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -6,11 +6,13 @@ DEBUG = env("DEBUG") SECRET_KEY = env("DJANGO_SECRET_KEY") ADDITIONAL_USER_PROJECTS_PATH = env("ADDITIONAL_USER_PROJECTS_PATH") + IDA_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-ida" ATT_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-att" PAS_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-pas" LEGACY_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-legacy" DFT_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-dft" +REPOTRONIC_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-repotronic" END_USER_ALLOWED_DATA_CATALOGS = [ IDA_DATA_CATALOG_IDENTIFIER, @@ -23,7 +25,9 @@ # catalogs where uniqueness of dataset pids is not enforced. LEGACY_CATALOGS = [ LEGACY_DATA_CATALOG_IDENTIFIER, + REPOTRONIC_DATA_CATALOG_IDENTIFIER, ] + VALIDATE_TOKEN_URL = env("VALIDATE_TOKEN_URL") CHECKSUM_ALGORITHMS = ["SHA-256", "MD5", "SHA-512"] ERROR_FILES_PATH = env("ERROR_FILES_PATH") From eab6273d3d6aabd62d479a376a435d8024cc1a48 Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 10 May 2021 09:58:30 +0300 Subject: [PATCH 089/160] CSCFAIRMETA-1026: Legacy datasets are not published to Etsin --- src/metax_api/models/catalog_record.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 9a499bde..1e1c0944 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -3071,8 +3071,10 @@ def __call__(self): try: for exchange in settings.RABBITMQ["EXCHANGES"]: - if exchange["EXC_TYPE"] == "dataset": - rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) + if self.cr.catalog_is_legacy() and exchange["NAME"] != "TTV-datasets": + continue + + rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) except: # note: if we'd like to let the request be a success even if this operation fails, # we could simply not raise an exception here. From 825fb23efa8f86ff0a22e691fd2e445d67f9f66c Mon Sep 17 00:00:00 2001 From: Tommi Pulli Date: Mon, 17 May 2021 10:27:30 +0300 Subject: [PATCH 090/160] CSCFAIRMETA-1014: fix failing unrelated tests --- src/metax_api/services/statistic_service.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index 21d547e7..fec1bda3 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -131,13 +131,7 @@ def count_datasets( if legacy is not None: where_args.append( - "".join( - [ - "and dc.catalog_json->>'identifier'", - " = " if legacy else " != ", - "any(%s)", - ] - ) + ''.join(["and", " " if legacy else " NOT ", "dc.catalog_json->>'identifier'", " = ", "any(%s)"]) ) sql_args.append(settings.LEGACY_CATALOGS) @@ -210,13 +204,7 @@ def total_datasets(cls, from_date, to_date, latest=True, legacy=None, removed=No if legacy is not None: filter_sql.append( - "".join( - [ - "and dc.catalog_json->>'identifier'", - " = " if legacy else " != ", - "any(%s)", - ] - ) + ''.join(["and", " " if legacy else " NOT ", "dc.catalog_json->>'identifier'", " = ", "any(%s)"]) ) filter_args.append(settings.LEGACY_CATALOGS) From f39de164020f4b236cd65564130e451eb33a1005 Mon Sep 17 00:00:00 2001 From: Toni Date: Wed, 3 Nov 2021 16:49:16 +0200 Subject: [PATCH 091/160] support for handling possible repotronic dataset without preferred identifier field, test for creating dataset in repotronic catalog --- src/metax_api/models/catalog_record.py | 34 +++++++++++-------- src/metax_api/services/rabbitmq_service.py | 5 +-- .../api/rest/base/views/datasets/write.py | 23 +++++++++++++ 3 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 1e1c0944..d052ee22 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -1392,16 +1392,20 @@ def _pre_create_operations(self, pid_type=None): elif self.catalog_is_legacy(): if "preferred_identifier" not in self.research_dataset: - raise ValidationError( - { - "detail": [ - "Selected catalog %s is a legacy catalog. Preferred identifiers are not " - "automatically generated for datasets stored in legacy catalogs, nor is " - "their uniqueness enforced. Please provide a value for dataset field " - "preferred_identifier." % self.data_catalog.catalog_json["identifier"] - ] - } - ) + + # Repotronic catalog does not need to validate unique identifiers + # Raise validation error when not repotronic catalog + if self.data_catalog.catalog_json["identifier"] != settings.REPOTRONIC_DATA_CATALOG_IDENTIFIER: + raise ValidationError( + { + "detail": [ + "Selected catalog %s is a legacy catalog. Preferred identifiers are not " + "automatically generated for datasets stored in legacy catalogs, nor is " + "their uniqueness enforced. Please provide a value for dataset field " + "preferred_identifier." % self.data_catalog.catalog_json["identifier"] + ] + } + ) _logger.info( "Catalog %s is a legacy catalog - not generating pid" % self.data_catalog.catalog_json["identifier"] @@ -1491,7 +1495,7 @@ def _post_create_operations(self): or self.use_doi_for_published is True ): self._validate_cr_against_datacite_schema() - if is_metax_generated_doi_identifier(self.research_dataset["preferred_identifier"]): + if is_metax_generated_doi_identifier(self.research_dataset.get("preferred_identifier")): self.add_post_request_callable( DataciteDOIUpdate(self, self.research_dataset["preferred_identifier"], "create") ) @@ -3071,10 +3075,12 @@ def __call__(self): try: for exchange in settings.RABBITMQ["EXCHANGES"]: - if self.cr.catalog_is_legacy() and exchange["NAME"] != "TTV-datasets": - continue + do_publish = True + if self.cr.catalog_is_legacy(): + do_publish = False - rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) + if do_publish: + rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) except: # note: if we'd like to let the request be a success even if this operation fails, # we could simply not raise an exception here. diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index 0e541693..ef0a710d 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -195,10 +195,11 @@ class _RabbitMQServiceDummy: """ def __init__(self, settings=settings): - pass + self.messages = [] def publish(self, body, routing_key="", exchange="datasets", persistent=True): - pass + msg = {"body":body, "routing_key":routing_key, "exchange":exchange, "persistent":persistent} + self.messages.append(msg) def init_exchanges(self, *args, **kwargs): pass diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index 7f7405d5..ce3ec89f 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -1057,6 +1057,29 @@ def test_catalog_record_deprecation_updates_date_modified(self): self.assertTrue(cr_depr.deprecated) # self.assertEqual(cr_depr.date_modified, cr_depr.date_deprecated, 'date_modified should be updated') + def test_catalog_record_create_repotronic_dataset(self): + + # Create the repotronic catalog + dc_id = django_settings.REPOTRONIC_DATA_CATALOG_IDENTIFIER + blueprint_dc = DataCatalog.objects.get(pk=1) + catalog_json = blueprint_dc.catalog_json + catalog_json["identifier"] = dc_id + catalog_json["dataset_versioning"] = False + catalog_json["research_dataset_schema"] = "att" + dc = DataCatalog.objects.create( + catalog_json=catalog_json, + date_created=get_tz_aware_now_without_micros(), + catalog_record_services_create="testuser,api_auth_user,metax", + catalog_record_services_edit="testuser,api_auth_user,metax", + catalog_record_services_read="testuser,api_auth_user,metax", + ) + cr = self._get_new_full_test_att_cr_data() + dc_json = self.client.get(f"/rest/datacatalogs/{dc_id}").data + cr["data_catalog"] = dc_json + cr_posted = self.client.post("/rest/datasets", cr, format="json") + # ic(RabbitMQService.messages.pop()) + self.assertEqual(cr_posted.status_code, 201, cr_posted.data) + def test_change_datacatalog_ATT_to_IDA(self): cr = self._get_new_full_test_att_cr_data() From 6741796c1f3924f3b498633f560321024430bc25 Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Mon, 30 Aug 2021 08:41:02 +0300 Subject: [PATCH 092/160] CSCFAIRMETA-1090: Editing rights for external editor * Add EditorPermissions to all datasets * Share EditorPermissions between dataset in same version set and their drafts * Add "creator" EditorUserPermission for metadata_provider_user when new dataset is created * Add editor_permissions_user query parameter to /rest/datasets --- .../migrations/0038_add_editorpermissions.py | 128 +++++ .../0039_editorpermissions_not_null.py | 19 + src/metax_api/models/catalog_record.py | 105 ++++ src/metax_api/models/catalog_record_v2.py | 6 + .../services/catalog_record_service.py | 20 + .../tests/testdata/generate_test_data.py | 38 +- src/metax_api/tests/testdata/test_data.json | 514 ++++++++++++++++++ 7 files changed, 826 insertions(+), 4 deletions(-) create mode 100644 src/metax_api/migrations/0038_add_editorpermissions.py create mode 100644 src/metax_api/migrations/0039_editorpermissions_not_null.py diff --git a/src/metax_api/migrations/0038_add_editorpermissions.py b/src/metax_api/migrations/0038_add_editorpermissions.py new file mode 100644 index 00000000..ee715318 --- /dev/null +++ b/src/metax_api/migrations/0038_add_editorpermissions.py @@ -0,0 +1,128 @@ +# Generated by Django 3.1.13 on 2021-08-26 11:14 + +from django.db import migrations, models +import django.db.models.deletion +from django.utils import timezone +import logging +from metax_api.models import catalog_record + +logger = logging.getLogger(__name__) + + +def add_permissions(apps, schema_editor): + """ + Add EditorPermissions for each version set and related next_draft CatalogRecords. + + Here CatalogRecords not belonging to a DatasetVersionSet are considered a version set with size 1. + """ + CatalogRecord = apps.get_model("metax_api", "CatalogRecordV2") + EditorUserPermission = apps.get_model("metax_api", "EditorUserPermission") + EditorPermissions = apps.get_model("metax_api", "EditorPermissions") + + num_perms = 0 + num_datasets = 0 + prev_version_set_id = None + version_set_users = [] + version_set_crs = [] + + def flush_version_set(): + """Create single EditorPermissions object for each version set, add creator user""" + nonlocal num_perms, num_datasets, version_set_crs, version_set_users + if len(version_set_crs) > 0: + permissions = EditorPermissions.objects.create() + permissions.catalog_records.add(*version_set_crs) + num_perms += 1 + num_datasets += len(version_set_crs) + + for user in version_set_users: + now = timezone.now().replace(microsecond=0) + EditorUserPermission.objects.create( + editor_permissions=permissions, + user_id=user, + verified=True, + role="creator", + date_created=now, + ) + version_set_users = [] + version_set_crs = [] + + # group datasets by version_sets and include their next_draft datasets + for cr in CatalogRecord.objects.filter(draft_of__isnull=True).order_by("dataset_version_set_id", "id"): + if cr.dataset_version_set_id is None or cr.dataset_version_set_id != prev_version_set_id: + flush_version_set() + + version_set_crs.append(cr) + if cr.next_draft: + version_set_crs.append(cr.next_draft) + + # DatasetVersionSet shouldn't have multiple metadata_provider_users but this supports them just in case + if cr.metadata_provider_user and cr.metadata_provider_user not in version_set_users: + version_set_users.append(cr.metadata_provider_user) + + prev_version_set_id = cr.dataset_version_set_id + flush_version_set() + + logger.info(f"Added {num_perms} EditorPermissions to {num_datasets} datasets") + +def revert(apps, schema_editor): + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0037_auto_20210811_1037'), + ] + + operations = [ + migrations.CreateModel( + name='EditorPermissions', + fields=[ + ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), + ], + ), + migrations.CreateModel( + name='EditorUserPermission', + fields=[ + ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), + ('active', models.BooleanField(default=True)), + ('removed', models.BooleanField(default=False)), + ('date_modified', models.DateTimeField(null=True)), + ('user_modified', models.CharField(max_length=200, null=True)), + ('date_created', models.DateTimeField()), + ('user_created', models.CharField(max_length=200, null=True)), + ('service_modified', models.CharField(help_text='Name of the service who last modified the record', max_length=200, null=True)), + ('service_created', models.CharField(help_text='Name of the service who created the record', max_length=200, null=True)), + ('date_removed', models.DateTimeField(null=True)), + ('user_id', models.CharField(max_length=200)), + ('role', models.CharField(choices=[('creator', 'Creator'), ('editor', 'Editor')], max_length=16)), + ('verified', models.BooleanField(default=False)), + ('verification_token', models.CharField(max_length=32, null=True)), + ('verification_token_expires', models.DateTimeField(null=True)), + ('editor_permissions', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='metax_api.editorpermissions')), + + ], + ), + migrations.AddField( + model_name='catalogrecord', + name='editor_permissions', + field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='catalog_records', to='metax_api.editorpermissions'), + ), + migrations.AddIndex( + model_name='editoruserpermission', + index=models.Index(fields=['user_id'], name='metax_api_e_user_id_0b47cc_idx'), + ), + migrations.AddConstraint( + model_name='editoruserpermission', + constraint=models.UniqueConstraint(fields=('editor_permissions', 'user_id'), name='unique_dataset_user_permission'), + ), + migrations.AddConstraint( + model_name='editoruserpermission', + constraint=models.CheckConstraint(check=models.Q(_negated=True, user_id=''), name='require_user_id'), + ), + migrations.AddConstraint( + model_name='editoruserpermission', + constraint=models.CheckConstraint(check=models.Q(role__in=['creator', 'editor']), name='require_role'), + ), + migrations.RunPython(add_permissions, revert), + ] diff --git a/src/metax_api/migrations/0039_editorpermissions_not_null.py b/src/metax_api/migrations/0039_editorpermissions_not_null.py new file mode 100644 index 00000000..0d7ef953 --- /dev/null +++ b/src/metax_api/migrations/0039_editorpermissions_not_null.py @@ -0,0 +1,19 @@ +# Generated by Django 3.1.13 on 2021-08-26 12:44 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0038_add_editorpermissions'), + ] + + operations = [ + migrations.AlterField( + model_name='catalogrecord', + name='editor_permissions', + field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='catalog_records', to='metax_api.editorpermissions'), + ), + ] diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index d052ee22..9f447e55 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -8,15 +8,18 @@ import logging from collections import defaultdict from copy import deepcopy +from datetime import datetime, timedelta from django.conf import settings from django.contrib.postgres.fields import ArrayField from django.db import connection, models, transaction from django.db.models import JSONField, Q, Sum from django.http import Http404 +from django.utils.crypto import get_random_string from rest_framework.serializers import ValidationError from metax_api.exceptions import Http400, Http403, Http503 +from metax_api.tasks.refdata.refdata_indexer import service from metax_api.utils import ( DelayedLog, IdentifierType, @@ -58,6 +61,76 @@ DFT_CATALOG = settings.DFT_DATA_CATALOG_IDENTIFIER +class EditorPermissions(models.Model): + """ + Shared permissions between linked copies of same dataset. + + Attaches a set of EditorUserPermission objects to a set of CatalogRecords. + """ + id = models.BigAutoField(primary_key=True, editable=False) + +class PermissionRole(models.TextChoices): + CREATOR = "creator" + EDITOR = "editor" + +class EditorUserPermission(Common): + """ + Table for attaching user roles to an EditorPermissions object. + """ + + # MODEL FIELD DEFINITIONS # + editor_permissions = models.ForeignKey( + EditorPermissions, related_name="users", on_delete=models.CASCADE + ) + user_id = models.CharField(max_length=200) + role = models.CharField(max_length=16, choices=PermissionRole.choices) + verified = models.BooleanField(default=False) + verification_token = models.CharField(max_length=32, null=True) + verification_token_expires = models.DateTimeField(null=True) + + class Meta: + indexes = [ + models.Index( + fields=[ + "user_id", + ] + ), + ] + constraints = [ + models.UniqueConstraint( + fields=["editor_permissions", "user_id"], name="unique_dataset_user_permission" + ), + models.CheckConstraint(check=~models.Q(user_id=""), name="require_user_id"), + models.CheckConstraint(check=models.Q(role__in=PermissionRole.values), name="require_role"), + ] + + def __repr__(self): + return f"" + + def clear_verification_token(self): + self.verification_token = None + self.verification_token_expires = None + + def generate_verification_token(self): + self.verification_token = get_random_string(length=32) + self.verification_token_expires = datetime.now() + timedelta(days=14) + + def verify(self, token): + if not token or token != self.verification_token or self.removed: + _logger.error("Invalid token or already used") + return False + if datetime.now() >= self.verification_token_expires: + _logger.error("Token expired") + return False + + self.verified = True + self.clear_verification_token() + + def delete(self, *args, **kwargs): + self.clear_verification_token() + super().remove(*args, **kwargs) + + class DiscardRecord(Exception): pass @@ -445,6 +518,10 @@ class CatalogRecord(Common): help_text="Saves api related info about the dataset. E.g. api version", ) + editor_permissions = models.ForeignKey( + EditorPermissions, related_name="catalog_records", null=False, on_delete=models.PROTECT + ) + # END OF MODEL FIELD DEFINITIONS # """ @@ -1462,6 +1539,12 @@ def _pre_create_operations(self, pid_type=None): self._set_api_version() + # only new datasets need new EditorPermissions, copies already have one + if not self.editor_permissions_id: + self._add_editor_permissions() + if self.metadata_provider_user: + self._add_creator_editor_user_permission() + def _post_create_operations(self): if "files" in self.research_dataset or "directories" in self.research_dataset: # files must be added after the record itself has been created, to be able @@ -3037,6 +3120,28 @@ def _copy_undeleted_files_from_old_version(self): if DEBUG: _logger.debug("Added %d files to dataset %s" % (n_files_copied, self._new_version.id)) + def _add_editor_permissions(self): + permissions = EditorPermissions.objects.create() + self.editor_permissions = permissions + + def _add_creator_editor_user_permission(self): + """ + Add creator permission to a newly created CatalogRecord. + """ + perm = EditorUserPermission( + editor_permissions=self.editor_permissions, + user_id=self.metadata_provider_user, + verified=True, + role=PermissionRole.CREATOR, + date_created=self.date_created, + date_modified=self.date_modified, + user_created=self.user_created, + user_modified=self.user_modified, + service_created=self.service_created, + service_modified=self.service_modified, + ) + perm.save() + class RabbitMQPublishRecord: diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index b9ee5895..583a75c2 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -117,6 +117,12 @@ def _pre_create_operations(self): self._set_api_version() + # only new datasets need new EditorPermissions, copies already have one + if not self.editor_permissions_id: + self._add_editor_permissions() + if self.metadata_provider_user: + self._add_creator_editor_user_permission() + def _post_create_operations(self, pid_type=None): if "files" in self.research_dataset or "directories" in self.research_dataset: diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 3f144272..1f35d33c 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -130,8 +130,28 @@ def get_queryset_search_params(cls, request): queryset_search_params["api_meta__contains"] = {"version": value} + if request.query_params.get("editor_permissions_user"): + cls.filter_by_editor_permissions_user(request, queryset_search_params) + return queryset_search_params + @staticmethod + def filter_by_editor_permissions_user(request, queryset_search_params): + """ + Add filter for querying datasets where user has verified editor user permissions. + """ + user_id = request.query_params["editor_permissions_user"] + + # non-service users can only query their own datasets + if not request.user.is_service: + if request.user.username == '': + raise Http403({"detail": ["Query by editor_permissions_user is only supported for authenticated users"]}) + if request.user.username != user_id: + raise Http403({"detail": ["Provided editor_permissions_user does not match current user"]}) + + queryset_search_params["editor_permissions__users__user_id"] = user_id + queryset_search_params["editor_permissions__users__verified"] = True + @staticmethod def filter_by_state(request, queryset_search_params): """ diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index 6729396a..b6ffcabc 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -268,6 +268,7 @@ def save_test_data( contract_list, catalog_record_list, dataset_version_sets, + editor_permissions ): with open("test_data.json", "w") as f: print("dumping test data as json to metax_api/tests/test_data.json...") @@ -278,13 +279,13 @@ def save_test_data( + data_catalogs_list + contract_list + dataset_version_sets + + editor_permissions + catalog_record_list, f, indent=4, sort_keys=True, ) - def generate_data_catalogs(start_idx, data_catalog_max_rows, validate_json, type): print("generating %s data catalogs..." % type) test_data_catalog_list = [] @@ -368,6 +369,28 @@ def generate_contracts(contract_max_rows, validate_json): return test_contract_list +def add_editor_permissions(editor_permissions, dataset): + # add EditorPermissions + pk = len(editor_permissions) + editor_perms = { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": pk, + } + editor_permissions.append(editor_perms) + editor_user_perms = { + "fields": { + "user_id": dataset["fields"]["metadata_provider_user"], + "date_created": dataset["fields"]["date_created"], + "editor_permissions_id": pk, + "role": "creator", + }, + "model": "metax_api.editoruserpermission", + "pk": pk + 1, + } + editor_permissions.append(editor_user_perms) + dataset["fields"]["editor_permissions_id"] = pk + def generate_catalog_records( basic_catalog_record_max_rows, data_catalogs_list, @@ -377,6 +400,7 @@ def generate_catalog_records( type, test_data_list=[], dataset_version_sets=[], + editor_permissions=[], ): print("generating %s catalog records..." % type) @@ -426,6 +450,8 @@ def generate_catalog_records( new["fields"]["date_created"] = "2017-05-23T10:07:22Z" new["fields"]["files"] = [] + add_editor_permissions(editor_permissions, new) + # add files if type == "ida": @@ -652,6 +678,8 @@ def generate_catalog_records( new["fields"]["date_modified"] = "2017-09-23T10:07:22Z" new["fields"]["date_created"] = "2017-05-23T10:07:22Z" + add_editor_permissions(editor_permissions, new) + new["fields"]["research_dataset"]["metadata_version_identifier"] = generate_test_identifier( cr_type, len(test_data_list) + 1, urn=False ) @@ -761,7 +789,7 @@ def generate_catalog_records( json_validate(new["fields"]["research_dataset"], json_schema) test_data_list.append(new) - return test_data_list, dataset_version_sets + return test_data_list, dataset_version_sets, editor_permissions def generate_alt_catalog_records(test_data_list): @@ -844,7 +872,7 @@ def set_qvain_info_to_records(catalog_record_list): ida_data_catalog_max_rows + 1, att_data_catalog_max_rows, validate_json, "att" ) - catalog_record_list, dataset_version_sets = generate_catalog_records( + catalog_record_list, dataset_version_sets, editor_permissions = generate_catalog_records( ida_catalog_record_max_rows, ida_data_catalogs_list, contract_list, @@ -853,7 +881,7 @@ def set_qvain_info_to_records(catalog_record_list): "ida", ) - catalog_record_list, dataset_version_sets = generate_catalog_records( + catalog_record_list, dataset_version_sets, editor_permissions = generate_catalog_records( att_catalog_record_max_rows, att_data_catalogs_list, contract_list, @@ -862,6 +890,7 @@ def set_qvain_info_to_records(catalog_record_list): "att", catalog_record_list, dataset_version_sets, + editor_permissions, ) catalog_record_list = generate_alt_catalog_records(catalog_record_list) @@ -876,6 +905,7 @@ def set_qvain_info_to_records(catalog_record_list): contract_list, catalog_record_list, dataset_version_sets, + editor_permissions, ) print("done") diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index c3802a10..ba51c435 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5712,6 +5712,396 @@ "model": "metax_api.datasetversionset", "pk": 13 }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 0 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 0, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 1 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 2 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 2, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 3 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 4 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 4, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 5 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 6 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 6, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 7 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 8 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 8, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 9 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 10 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 10, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 11 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 12 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 12, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 13 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 14 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 14, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 15 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 16 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 16, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 17 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 18 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 18, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 19 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 20 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 20, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 21 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 22 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 22, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 23 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 24 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 24, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 25 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 26 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 26, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 27 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 28 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 28, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 29 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 30 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 30, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 31 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 32 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 32, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 33 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 34 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 34, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 35 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 36 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 36, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 37 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 38 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 38, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 39 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 40 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 40, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 41 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 42 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 42, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 43 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 44 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 44, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 45 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 46 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 46, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 47 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 48 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 48, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 49 + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": 50 + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": 50, + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": 51 + }, { "fields": {}, "model": "metax_api.alternaterecordset", @@ -5727,6 +6117,13 @@ "dataset_version_set": 1, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "053bffbcc41edad4853bea91fc42ea18", + "identifier": "qvain", + "owner_id": "053bffbcc41edad4853bea91fc42ea18", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9001" + }, + "editor_permissions_id": 0, "files": [ 1, 2 @@ -5882,6 +6279,13 @@ "dataset_version_set": 2, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", + "identifier": "qvain", + "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9002" + }, + "editor_permissions_id": 2, "files": [ 3, 4 @@ -6037,6 +6441,13 @@ "dataset_version_set": 3, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "05593961536b76fa825281ccaedd4d4f", + "identifier": "qvain", + "owner_id": "05593961536b76fa825281ccaedd4d4f", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9003" + }, + "editor_permissions_id": 4, "files": [ 5, 6 @@ -6192,6 +6603,13 @@ "dataset_version_set": 4, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "055ea4dade5ab2145954f56d4b51cef0", + "identifier": "qvain", + "owner_id": "055ea4dade5ab2145954f56d4b51cef0", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9004" + }, + "editor_permissions_id": 6, "files": [ 7, 8 @@ -6347,6 +6765,13 @@ "dataset_version_set": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "055ea531a6cac569425bed94459266ee", + "identifier": "qvain", + "owner_id": "055ea531a6cac569425bed94459266ee", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9005" + }, + "editor_permissions_id": 8, "files": [ 9, 10 @@ -6502,6 +6927,13 @@ "dataset_version_set": 6, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "053bffbcc41edad4853bea91fc42ea18", + "identifier": "qvain", + "owner_id": "053bffbcc41edad4853bea91fc42ea18", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9006" + }, + "editor_permissions_id": 10, "files": [ 11, 12 @@ -6657,6 +7089,13 @@ "dataset_version_set": 7, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", + "identifier": "qvain", + "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9007" + }, + "editor_permissions_id": 12, "files": [ 13, 14 @@ -6812,6 +7251,13 @@ "dataset_version_set": 8, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "05593961536b76fa825281ccaedd4d4f", + "identifier": "qvain", + "owner_id": "05593961536b76fa825281ccaedd4d4f", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9008" + }, + "editor_permissions_id": 14, "files": [ 15, 16 @@ -6952,6 +7398,13 @@ "dataset_version_set": 9, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "055ea4dade5ab2145954f56d4b51cef0", + "identifier": "qvain", + "owner_id": "055ea4dade5ab2145954f56d4b51cef0", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9009" + }, + "editor_permissions_id": 16, "files": [ 17, 18 @@ -7078,6 +7531,13 @@ "dataset_version_set": 10, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "055ea531a6cac569425bed94459266ee", + "identifier": "qvain", + "owner_id": "055ea531a6cac569425bed94459266ee", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9010" + }, + "editor_permissions_id": 18, "files": [ 19, 20 @@ -7201,6 +7661,13 @@ "dataset_version_set": 11, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor": { + "creator_id": "053bffbcc41edad4853bea91fc42ea18", + "identifier": "qvain", + "owner_id": "053bffbcc41edad4853bea91fc42ea18", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9011" + }, + "editor_permissions_id": 20, "files": [ 1, 2, @@ -8152,6 +8619,13 @@ "dataset_version_set": 12, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor": { + "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", + "identifier": "qvain", + "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9012" + }, + "editor_permissions_id": 22, "files": [ 1, 2, @@ -9103,6 +9577,13 @@ "dataset_version_set": 13, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor": { + "creator_id": "05593961536b76fa825281ccaedd4d4f", + "identifier": "qvain", + "owner_id": "05593961536b76fa825281ccaedd4d4f", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9013" + }, + "editor_permissions_id": 24, "files": [ 22, 23, @@ -10146,6 +10627,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 26, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9614", "metadata_owner_org": "abc-org-123", @@ -10285,6 +10767,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 28, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9615", "metadata_owner_org": "abc-org-123", @@ -10424,6 +10907,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 30, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9616", "metadata_owner_org": "abc-org-123", @@ -10563,6 +11047,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 32, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9617", "metadata_owner_org": "abc-org-123", @@ -10702,6 +11187,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 34, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9618", "metadata_owner_org": "abc-org-123", @@ -10841,6 +11327,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 36, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9619", "metadata_owner_org": "abc-org-123", @@ -10980,6 +11467,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 38, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9620", "metadata_owner_org": "abc-org-123", @@ -11110,6 +11598,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 40, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9621", "metadata_owner_org": "abc-org-123", @@ -11240,6 +11729,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 42, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9622", "metadata_owner_org": "abc-org-123", @@ -11370,6 +11860,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 44, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9623", "metadata_owner_org": "abc-org-123", @@ -11499,6 +11990,11 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor": { + "creator_id": "053bffbcc41edad4853bea91fc42ea18", + "owner_id": "053bffbcc41edad4853bea91fc42ea18" + }, + "editor_permissions_id": 46, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9624", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -12433,6 +12929,11 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor": { + "creator_id": "053bffbcc41edad4853bea91fc42ea18", + "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd" + }, + "editor_permissions_id": 48, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9625", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -13367,6 +13868,11 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor": { + "creator_id": "053bffbcc41edad4853bea91fc42ea18", + "owner_id": "05593961536b76fa825281ccaedd4d4f" + }, + "editor_permissions_id": 50, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9626", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -14304,6 +14810,13 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor": { + "creator_id": "055ea4dade5ab2145954f56d4b51cef0", + "identifier": "qvain", + "owner_id": "055ea4dade5ab2145954f56d4b51cef0", + "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9027" + }, + "editor_permissions_id": 18, "files": [ 19, 20 @@ -14429,6 +14942,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": 18, "files": [ 19, 20 From d10c3f32ebbb16318ed79fe9e0aca7b3710b26cb Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Tue, 31 Aug 2021 09:40:46 +0300 Subject: [PATCH 093/160] Add tests, fix test data --- .../api/rest/base/views/datasets/write.py | 20 +++++ .../api/rest/v2/views/datasets/drafts.py | 16 ++++ .../tests/api/rpc/v2/views/dataset_rpc.py | 15 ++++ .../tests/testdata/generate_test_data.py | 1 + src/metax_api/tests/testdata/test_data.json | 78 ++++++++++++------- 5 files changed, 104 insertions(+), 26 deletions(-) diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index ce3ec89f..24edfe0b 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -556,6 +556,26 @@ def test_create_catalog_record_using_pid_type(self): response.data["research_dataset"]["preferred_identifier"].startswith("urn:") ) + def test_create_catalog_record_adds_creator_permission(self): + response = self.client.post( + "/rest/datasets", + self.cr_test_data, + format="json", + ) + cr = CatalogRecord.objects.get(id=response.data["id"]) + import json + print(json.dumps(response.data, indent=2)) + self.assertEqual( + list(cr.editor_permissions.users.values("user_id", "role", "verified")), + [ + { + "user_id": self.cr_test_data["metadata_provider_user"], + "role": "creator", + "verified": True, + } + ], + ) + class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): """ diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py index 710ecfef..398e661c 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py @@ -612,6 +612,22 @@ def test_create_and_merge_draft(self): ) self.assertEqual("next_draft" in response.data, False, "next_draft link should be gone") + def test_create_and_merge_draft_keeps_permissions(self): + """ + Ensure creating and merging drafts keeps the same EditorPermission object. + """ + cr = self._create_dataset() + original_editor_permissions_id = CatalogRecordV2.objects.get(id=cr['id']).editor_permissions_id + + draft_cr = self._create_draft(cr["id"]) + draft_editor_permissions_id = CatalogRecordV2.objects.get(id=draft_cr['id']).editor_permissions_id + self.assertEqual(draft_editor_permissions_id, original_editor_permissions_id) + + self._merge_draft_changes(draft_cr["id"]) + merged_editor_permissions_id = CatalogRecordV2.objects.get(id=cr['id']).editor_permissions_id + self.assertEqual(merged_editor_permissions_id, original_editor_permissions_id) + + def test_missing_issued_date_is_generated_when_draft_is_merged(self): """ Testing a case where user removes 'issued_date' from draft before merging diff --git a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py index d89f0c29..d61fce67 100755 --- a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py @@ -121,6 +121,21 @@ def test_create_new_version(self): response.data["identifier"], ) + def test_create_new_version_shares_permissions(self): + """ + Ensure new version shares EditorPermissions with the original. + """ + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=5", format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + next_version_identifier = response.data.get("identifier") + cr = CR.objects.get(id=5) + next_version_cr = CR.objects.get(identifier=next_version_identifier) + self.assertEqual(cr.editor_permissions_id, next_version_cr.editor_permissions_id) + + def test_delete_new_version_draft(self): """ Ensure a new version that is created into draft state can be deleted, and is permanently deleted. diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index b6ffcabc..04da0d5c 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -384,6 +384,7 @@ def add_editor_permissions(editor_permissions, dataset): "date_created": dataset["fields"]["date_created"], "editor_permissions_id": pk, "role": "creator", + "verified": True, }, "model": "metax_api.editoruserpermission", "pk": pk + 1, diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index ba51c435..aec0e37a 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5722,7 +5722,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 0, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 1 @@ -5737,7 +5738,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 2, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 3 @@ -5752,7 +5754,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 4, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 5 @@ -5767,7 +5770,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 6, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 7 @@ -5782,7 +5786,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 8, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 9 @@ -5797,7 +5802,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 10, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 11 @@ -5812,7 +5818,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 12, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 13 @@ -5827,7 +5834,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 14, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 15 @@ -5842,7 +5850,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 16, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 17 @@ -5857,7 +5866,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 18, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 19 @@ -5872,7 +5882,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 20, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 21 @@ -5887,7 +5898,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 22, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 23 @@ -5902,7 +5914,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 24, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 25 @@ -5917,7 +5930,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 26, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 27 @@ -5932,7 +5946,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 28, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 29 @@ -5947,7 +5962,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 30, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 31 @@ -5962,7 +5978,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 32, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 33 @@ -5977,7 +5994,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 34, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 35 @@ -5992,7 +6010,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 36, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 37 @@ -6007,7 +6026,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 38, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 39 @@ -6022,7 +6042,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 40, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 41 @@ -6037,7 +6058,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 42, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 43 @@ -6052,7 +6074,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 44, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 45 @@ -6067,7 +6090,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 46, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 47 @@ -6082,7 +6106,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 48, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 49 @@ -6097,7 +6122,8 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 50, "role": "creator", - "user_id": "abc-user-123" + "user_id": "abc-user-123", + "verified": true }, "model": "metax_api.editoruserpermission", "pk": 51 From 804ab7458a944c2f1a18c7341cd2f38f7787d49a Mon Sep 17 00:00:00 2001 From: aptiaine Date: Thu, 16 Sep 2021 10:00:54 +0300 Subject: [PATCH 094/160] Adding first endpoint --- src/metax_api/api/rest/base/router.py | 9 +++++++++ src/metax_api/api/rest/base/serializers/__init__.py | 1 + src/metax_api/api/rest/base/views/dataset_view.py | 8 ++++++++ src/metax_api/models/__init__.py | 2 +- 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index 742103a3..786bd1b2 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -83,4 +83,13 @@ def get_default_basename(self, viewset): DatasetViewSet, ) +router.register( + r"datasets/(?P.+)/editor_permissions/users/(?P.+)/?", + DatasetViewSet, +) +router.register( + r"datasets/(?P.+)/editor_permissions/users/?", + DatasetViewSet, +) + api_urlpatterns = router.urls diff --git a/src/metax_api/api/rest/base/serializers/__init__.py b/src/metax_api/api/rest/base/serializers/__init__.py index 1c243720..84d9c24b 100755 --- a/src/metax_api/api/rest/base/serializers/__init__.py +++ b/src/metax_api/api/rest/base/serializers/__init__.py @@ -13,3 +13,4 @@ from .file_storage_serializer import FileStorageSerializer from .serializer_utils import validate_json from .xml_metadata_serializer import XmlMetadataSerializer +from .editor_permissions_serializer import EditorPermissionsSerializer diff --git a/src/metax_api/api/rest/base/views/dataset_view.py b/src/metax_api/api/rest/base/views/dataset_view.py index bd416afa..fcef6d62 100755 --- a/src/metax_api/api/rest/base/views/dataset_view.py +++ b/src/metax_api/api/rest/base/views/dataset_view.py @@ -116,6 +116,14 @@ def list(self, request, *args, **kwargs): if "identifier" in kwargs and "metadata_version_identifier" in kwargs: return self._metadata_version_get(request, *args, **kwargs) + if "cr_identifier" in kwargs: + from ..serializers import EditorPermissionsSerializer + cr = CatalogRecord.objects.get(pk=kwargs['cr_identifier']) + + _all = cr.editor_permissions.users.all() + editorserializer = EditorPermissionsSerializer(_all, many=True) + return Response(editorserializer.data) + return super(DatasetViewSet, self).list(request, *args, **kwargs) def _metadata_version_get(self, request, *args, **kwargs): diff --git a/src/metax_api/models/__init__.py b/src/metax_api/models/__init__.py index ec08fb44..643cac39 100755 --- a/src/metax_api/models/__init__.py +++ b/src/metax_api/models/__init__.py @@ -6,7 +6,7 @@ # :license: MIT from .api_error import ApiError -from .catalog_record import AlternateRecordSet, CatalogRecord +from .catalog_record import AlternateRecordSet, CatalogRecord, EditorPermissions, EditorUserPermission from .catalog_record_v2 import CatalogRecordV2 from .common import Common from .contract import Contract From e532fa2dfd4fec15379195d6b247d4d8c2338e17 Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Mon, 6 Sep 2021 12:36:57 +0300 Subject: [PATCH 095/160] * Add removed=False check to editor_permissions_user filter * Add tests to editor_permissions_user filter * Remove debug print from tests --- .../services/catalog_record_service.py | 1 + .../api/rest/base/views/datasets/read.py | 19 +++++++++++++++++++ .../api/rest/base/views/datasets/write.py | 2 -- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 1f35d33c..09901390 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -151,6 +151,7 @@ def filter_by_editor_permissions_user(request, queryset_search_params): queryset_search_params["editor_permissions__users__user_id"] = user_id queryset_search_params["editor_permissions__users__verified"] = True + queryset_search_params["editor_permissions__users__removed"] = False @staticmethod def filter_by_state(request, queryset_search_params): diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index 98b255bc..15e20e12 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -965,6 +965,25 @@ def test_filter_by_legacy(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(count_all, response.data["count"], response.data) + def test_filter_by_editor_permissions_user_ok(self): + cr = CatalogRecord.objects.get(pk=1) + cr.editor_permissions.users.update(user_id='test_user_x') + response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") + self.assertEqual(response.data["count"], 1) + + def test_filter_by_editor_permissions_user_not_verified(self): + cr = CatalogRecord.objects.get(pk=1) + cr.editor_permissions.users.update(user_id='test_user_x', verified=False) + response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") + self.assertEqual(response.data["count"], 0) + + def test_filter_by_editor_permissions_user_removed(self): + cr = CatalogRecord.objects.get(pk=1) + cr.editor_permissions.users.update(user_id='test_user_x') + cr.editor_permissions.users.first().delete() + response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") + self.assertEqual(response.data["count"], 0) + class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index 24edfe0b..d7613a92 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -563,8 +563,6 @@ def test_create_catalog_record_adds_creator_permission(self): format="json", ) cr = CatalogRecord.objects.get(id=response.data["id"]) - import json - print(json.dumps(response.data, indent=2)) self.assertEqual( list(cr.editor_permissions.users.values("user_id", "role", "verified")), [ From 1dc1c4946eed9a11e215f608b68e45834cce378a Mon Sep 17 00:00:00 2001 From: aptiaine Date: Thu, 14 Oct 2021 10:09:47 +0300 Subject: [PATCH 096/160] Editor rights endpoints added --- src/metax_api/api/rest/base/router.py | 9 +- .../editor_permissions_serializer.py | 24 ++++ src/metax_api/api/rest/base/views/__init__.py | 1 + .../api/rest/base/views/dataset_view.py | 9 +- .../base/views/editor_permissions_view.py | 108 ++++++++++++++++++ ...sions.py => 0039_add_editorpermissions.py} | 8 +- .../0039_editorpermissions_not_null.py | 19 --- .../settings/components/access_control.py | 24 ++++ 8 files changed, 170 insertions(+), 32 deletions(-) create mode 100644 src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py create mode 100644 src/metax_api/api/rest/base/views/editor_permissions_view.py rename src/metax_api/migrations/{0038_add_editorpermissions.py => 0039_add_editorpermissions.py} (93%) delete mode 100644 src/metax_api/migrations/0039_editorpermissions_not_null.py diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index 786bd1b2..160e1bd0 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -27,6 +27,7 @@ DataCatalogViewSet, DatasetViewSet, DirectoryViewSet, + EditorPermissionViewSet, FileStorageViewSet, FileViewSet, SchemaViewSet, @@ -84,12 +85,12 @@ def get_default_basename(self, viewset): ) router.register( - r"datasets/(?P.+)/editor_permissions/users/(?P.+)/?", - DatasetViewSet, + r"datasets/(?P.+)/editor_permissions/users/(?P.+)$", + EditorPermissionViewSet, ) router.register( - r"datasets/(?P.+)/editor_permissions/users/?", - DatasetViewSet, + r"datasets/(?P.+)/editor_permissions/users$", + EditorPermissionViewSet, ) api_urlpatterns = router.urls diff --git a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py new file mode 100644 index 00000000..b7d3e733 --- /dev/null +++ b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py @@ -0,0 +1,24 @@ +from django.core.validators import EMPTY_VALUES + +from rest_framework.serializers import ValidationError, ModelSerializer + +from metax_api.models import EditorUserPermission + +from .common_serializer import CommonSerializer +from .serializer_utils import validate_json + + +class EditorPermissionsSerializer(ModelSerializer): + class Meta: + model = EditorUserPermission + fields = '__all__' + + extra_kwargs = CommonSerializer.Meta.extra_kwargs + + def validate(self, attrs): + data = ModelSerializer.validate(self, attrs) + + if data.get('verified') and data.get('verification_token') in EMPTY_VALUES: + raise ValidationError({'verification_token': 'Verification token missing'}) + + return data diff --git a/src/metax_api/api/rest/base/views/__init__.py b/src/metax_api/api/rest/base/views/__init__.py index d5c62dd9..027ae76f 100755 --- a/src/metax_api/api/rest/base/views/__init__.py +++ b/src/metax_api/api/rest/base/views/__init__.py @@ -10,6 +10,7 @@ from .data_catalog_view import DataCatalogViewSet from .dataset_view import DatasetViewSet from .directory_view import DirectoryViewSet +from .editor_permissions_view import EditorPermissionViewSet from .file_storage_view import FileStorageViewSet from .file_view import FileViewSet from .schema_view import SchemaViewSet diff --git a/src/metax_api/api/rest/base/views/dataset_view.py b/src/metax_api/api/rest/base/views/dataset_view.py index fcef6d62..1aeaab0b 100755 --- a/src/metax_api/api/rest/base/views/dataset_view.py +++ b/src/metax_api/api/rest/base/views/dataset_view.py @@ -10,6 +10,7 @@ from django.conf import settings from django.http import Http404 + from rest_framework import status from rest_framework.decorators import action from rest_framework.response import Response @@ -116,14 +117,6 @@ def list(self, request, *args, **kwargs): if "identifier" in kwargs and "metadata_version_identifier" in kwargs: return self._metadata_version_get(request, *args, **kwargs) - if "cr_identifier" in kwargs: - from ..serializers import EditorPermissionsSerializer - cr = CatalogRecord.objects.get(pk=kwargs['cr_identifier']) - - _all = cr.editor_permissions.users.all() - editorserializer = EditorPermissionsSerializer(_all, many=True) - return Response(editorserializer.data) - return super(DatasetViewSet, self).list(request, *args, **kwargs) def _metadata_version_get(self, request, *args, **kwargs): diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py new file mode 100644 index 00000000..3f261da9 --- /dev/null +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -0,0 +1,108 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT +import datetime +import logging +from json import dump + +from django.conf import settings +from django.core.validators import EMPTY_VALUES + +from django.shortcuts import get_object_or_404 +from rest_framework import status + +from rest_framework.response import Response + +from metax_api.models import CatalogRecord, Common +from metax_api.models.catalog_record import PermissionRole + +from ..serializers import EditorPermissionsSerializer +from .common_view import CommonViewSet + +_logger = logging.getLogger(__name__) + +# GET /rest/datasets//editor_permissions/users List CatalogRecord.editor_permissions.users.all() +# POST /rest/datasets//editor_permissions/users Add new (unverified) user permission with { user_id: x, role: y}, return created EditorUserPermission +# GET /rest/datasets//editor_permissions/users/ Return CatalogRecord.editor_permissions.users.get(user_id=) +# DELETE /rest/datasets//editor_permissions/users/ "Delete" EditorUserPermission (marks as removed) +# PATCH /rest/datasets//editor_permissions/users/ Update EditorUserPermission.role or enable EditorUserPermission.verified +# +# GET /rest/datasets?editor_permissions_user= List datasets for _verified_ EditorUserPermission objects with user_id +# +# * Enabling verified with PATCH should require verification_token and verify it, e.g. { verified: True, verification_token: xxx } +# * POSTing removed user should "unremove" the user and reset verification token +# * DELETE should be prevented for last user with role="creator" + + +class EditorPermissionViewSet(CommonViewSet): + + def __init__(self, *args, **kwargs): + # As opposed to other views, do not set json schema here + # It is done in the serializer + super(EditorPermissionViewSet, self).__init__(*args, **kwargs) + + def list(self, request, *args, **kwargs): + cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) + if 'user_id' in kwargs: + user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) + editorserializer = EditorPermissionsSerializer(user) + else: + editorserializer = EditorPermissionsSerializer(cr.editor_permissions.users.all(), many=True) + return Response(editorserializer.data) + + def create(self, request, *args, **kwargs): + data = request.data + cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) + if 'user_id' in data: + editorserializer = None + removed_user = cr.editor_permissions.users.filter(user_id=data.get('user_id')).first() + data['verified'] = False + if removed_user not in EMPTY_VALUES and removed_user.removed is True: + data['verification_token'] = None + data['date_modified'] = datetime.datetime.now() + data['date_removed'] = None + data['removed'] = False + editorserializer = EditorPermissionsSerializer(removed_user, data=data) + elif removed_user not in EMPTY_VALUES and removed_user.removed is False: + return Response({'user_id': "User_id already exists"}, status=status.HTTP_400_BAD_REQUEST) + else: + data['editor_permissions'] = cr.editor_permissions.pk + data['date_created'] = datetime.datetime.now() + editorserializer = EditorPermissionsSerializer(data=data) + if editorserializer.is_valid(): + editorserializer.save() + return Response(editorserializer.data, status=status.HTTP_201_CREATED) + else: + return Response(editorserializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def destroy_bulk(self, request, *args, **kwargs): + cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) + try: + user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) + except Exception as exc: + return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) + + creators = cr.editor_permissions.users.filter(role=PermissionRole.CREATOR, removed=False).count() + if user.role == PermissionRole.CREATOR and creators < 2: + return Response({"error": "Can't delete last creator"}) + else: + user.remove() + return Response(status=status.HTTP_200_OK) + + def partial_update_bulk(self, request, **kwargs): + data = request.data + cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) + try: + user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) + except Exception as exc: + return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) + data['user_modified'] = datetime.datetime.now() + serializer = EditorPermissionsSerializer(user, data=data, partial=True) + if serializer.is_valid(): + serializer.save() + return Response(serializer.data) + else: + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) diff --git a/src/metax_api/migrations/0038_add_editorpermissions.py b/src/metax_api/migrations/0039_add_editorpermissions.py similarity index 93% rename from src/metax_api/migrations/0038_add_editorpermissions.py rename to src/metax_api/migrations/0039_add_editorpermissions.py index ee715318..f268e437 100644 --- a/src/metax_api/migrations/0038_add_editorpermissions.py +++ b/src/metax_api/migrations/0039_add_editorpermissions.py @@ -71,7 +71,7 @@ def revert(apps, schema_editor): class Migration(migrations.Migration): dependencies = [ - ('metax_api', '0037_auto_20210811_1037'), + ('metax_api', '0038_remove_catalogrecord_editor'), ] operations = [ @@ -125,4 +125,10 @@ class Migration(migrations.Migration): constraint=models.CheckConstraint(check=models.Q(role__in=['creator', 'editor']), name='require_role'), ), migrations.RunPython(add_permissions, revert), + migrations.AlterField( + model_name='catalogrecord', + name='editor_permissions', + field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='catalog_records', + to='metax_api.editorpermissions'), + ), ] diff --git a/src/metax_api/migrations/0039_editorpermissions_not_null.py b/src/metax_api/migrations/0039_editorpermissions_not_null.py deleted file mode 100644 index 0d7ef953..00000000 --- a/src/metax_api/migrations/0039_editorpermissions_not_null.py +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by Django 3.1.13 on 2021-08-26 12:44 - -from django.db import migrations, models -import django.db.models.deletion - - -class Migration(migrations.Migration): - - dependencies = [ - ('metax_api', '0038_add_editorpermissions'), - ] - - operations = [ - migrations.AlterField( - model_name='catalogrecord', - name='editor_permissions', - field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='catalog_records', to='metax_api.editorpermissions'), - ), - ] diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 756d1a57..f8a3d018 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -11,6 +11,7 @@ "datacatalogs": {}, "datasets": {}, "directories": {}, + "editorpermissions": {}, "files": {}, "filestorages": {}, "schemas": {}, @@ -92,6 +93,29 @@ def __lt__(self, other): Role.ETSIN, ] +api_permissions.rest.editorpermissions.create = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] +api_permissions.rest.editorpermissions.read = [Role.ALL] +api_permissions.rest.editorpermissions["update"] = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] +api_permissions.rest.editorpermissions.delete = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] + api_permissions.rest.directories.read = [ Role.METAX, Role.QVAIN, From 12861bfc0f02d579ec37601a496f9fef53aed607 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Fri, 29 Oct 2021 15:38:06 +0300 Subject: [PATCH 097/160] Editor rights api tests included --- .../editor_permissions_serializer.py | 1 - .../base/views/editor_permissions_view.py | 34 ++-- .../base/views/editorpremissions/__init__.py | 2 + .../rest/base/views/editorpremissions/read.py | 82 +++++++++ .../base/views/editorpremissions/write.py | 166 ++++++++++++++++++ 5 files changed, 263 insertions(+), 22 deletions(-) create mode 100644 src/metax_api/tests/api/rest/base/views/editorpremissions/__init__.py create mode 100644 src/metax_api/tests/api/rest/base/views/editorpremissions/read.py create mode 100644 src/metax_api/tests/api/rest/base/views/editorpremissions/write.py diff --git a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py index b7d3e733..e38dc313 100644 --- a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py +++ b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py @@ -5,7 +5,6 @@ from metax_api.models import EditorUserPermission from .common_serializer import CommonSerializer -from .serializer_utils import validate_json class EditorPermissionsSerializer(ModelSerializer): diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py index 3f261da9..a63d29f3 100644 --- a/src/metax_api/api/rest/base/views/editor_permissions_view.py +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -6,9 +6,7 @@ # :license: MIT import datetime import logging -from json import dump -from django.conf import settings from django.core.validators import EMPTY_VALUES from django.shortcuts import get_object_or_404 @@ -16,26 +14,14 @@ from rest_framework.response import Response -from metax_api.models import CatalogRecord, Common -from metax_api.models.catalog_record import PermissionRole +from metax_api.models import CatalogRecord +from metax_api.models.catalog_record import PermissionRole, EditorUserPermission from ..serializers import EditorPermissionsSerializer from .common_view import CommonViewSet _logger = logging.getLogger(__name__) -# GET /rest/datasets//editor_permissions/users List CatalogRecord.editor_permissions.users.all() -# POST /rest/datasets//editor_permissions/users Add new (unverified) user permission with { user_id: x, role: y}, return created EditorUserPermission -# GET /rest/datasets//editor_permissions/users/ Return CatalogRecord.editor_permissions.users.get(user_id=) -# DELETE /rest/datasets//editor_permissions/users/ "Delete" EditorUserPermission (marks as removed) -# PATCH /rest/datasets//editor_permissions/users/ Update EditorUserPermission.role or enable EditorUserPermission.verified -# -# GET /rest/datasets?editor_permissions_user= List datasets for _verified_ EditorUserPermission objects with user_id -# -# * Enabling verified with PATCH should require verification_token and verify it, e.g. { verified: True, verification_token: xxx } -# * POSTing removed user should "unremove" the user and reset verification token -# * DELETE should be prevented for last user with role="creator" - class EditorPermissionViewSet(CommonViewSet): @@ -47,7 +33,10 @@ def __init__(self, *args, **kwargs): def list(self, request, *args, **kwargs): cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) if 'user_id' in kwargs: - user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) + try: + user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) + except Exception as exc: + return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) editorserializer = EditorPermissionsSerializer(user) else: editorserializer = EditorPermissionsSerializer(cr.editor_permissions.users.all(), many=True) @@ -58,14 +47,15 @@ def create(self, request, *args, **kwargs): cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) if 'user_id' in data: editorserializer = None - removed_user = cr.editor_permissions.users.filter(user_id=data.get('user_id')).first() + removed_user = EditorUserPermission.objects_unfiltered.filter(user_id=data.get('user_id'), + editor_permissions_id=cr.editor_permissions_id).first() data['verified'] = False if removed_user not in EMPTY_VALUES and removed_user.removed is True: data['verification_token'] = None data['date_modified'] = datetime.datetime.now() data['date_removed'] = None data['removed'] = False - editorserializer = EditorPermissionsSerializer(removed_user, data=data) + editorserializer = EditorPermissionsSerializer(removed_user, data=data, partial=True) elif removed_user not in EMPTY_VALUES and removed_user.removed is False: return Response({'user_id': "User_id already exists"}, status=status.HTTP_400_BAD_REQUEST) else: @@ -77,6 +67,8 @@ def create(self, request, *args, **kwargs): return Response(editorserializer.data, status=status.HTTP_201_CREATED) else: return Response(editorserializer.errors, status=status.HTTP_400_BAD_REQUEST) + else: + return Response({'user_id': 'Missing user_id'}, status=status.HTTP_400_BAD_REQUEST) def destroy_bulk(self, request, *args, **kwargs): cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) @@ -87,7 +79,7 @@ def destroy_bulk(self, request, *args, **kwargs): creators = cr.editor_permissions.users.filter(role=PermissionRole.CREATOR, removed=False).count() if user.role == PermissionRole.CREATOR and creators < 2: - return Response({"error": "Can't delete last creator"}) + return Response({"error": "Can't delete last creator"}, status=status.HTTP_400_BAD_REQUEST) else: user.remove() return Response(status=status.HTTP_200_OK) @@ -99,7 +91,7 @@ def partial_update_bulk(self, request, **kwargs): user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) except Exception as exc: return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) - data['user_modified'] = datetime.datetime.now() + data['date_modified'] = datetime.datetime.now() serializer = EditorPermissionsSerializer(user, data=data, partial=True) if serializer.is_valid(): serializer.save() diff --git a/src/metax_api/tests/api/rest/base/views/editorpremissions/__init__.py b/src/metax_api/tests/api/rest/base/views/editorpremissions/__init__.py new file mode 100644 index 00000000..aef77389 --- /dev/null +++ b/src/metax_api/tests/api/rest/base/views/editorpremissions/__init__.py @@ -0,0 +1,2 @@ +from .read import * +from .write import * diff --git a/src/metax_api/tests/api/rest/base/views/editorpremissions/read.py b/src/metax_api/tests/api/rest/base/views/editorpremissions/read.py new file mode 100644 index 00000000..2ce25083 --- /dev/null +++ b/src/metax_api/tests/api/rest/base/views/editorpremissions/read.py @@ -0,0 +1,82 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + +from json import load as json_load + +from django.core.management import call_command + +from rest_framework import status +from rest_framework.test import APITestCase + +from metax_api.tests.utils import TestClassUtils, test_data_file_path + + +class EditorUserPermissionApiReadCommon(APITestCase, TestClassUtils): + @classmethod + def setUpClass(cls): + """ + Loaded only once for test cases inside this class. + """ + call_command("loaddata", test_data_file_path, verbosity=0) + super(EditorUserPermissionApiReadCommon, cls).setUpClass() + + def setUp(self): + self.cr_from_test_data = self._get_whole_object_from_test_data("catalogrecord", requested_pk=1) + self.crid = self.cr_from_test_data['pk'] + self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] + self.editor_user_permission = self._get_whole_object_from_test_data("editoruserpermission", requested_pk=1) + self.userid = self.editor_user_permission["fields"]["user_id"] + self._use_http_authorization() + + def _get_whole_object_from_test_data(self, model_name, requested_pk=0): + + with open(test_data_file_path) as test_data_file: + test_data = json_load(test_data_file) + + model = "metax_api.%s" % model_name + i = 0 + + for row in test_data: + if row["model"] == model: + if row["pk"] == requested_pk: + obj = { + "id": row["pk"], + } + obj.update(row) + return obj + + raise Exception( + "Could not find model %s from test data with pk == %d. " + "Are you certain you generated rows for model %s in generate_test_data.py?" + % (model_name, requested_pk, model_name) + ) + + +class EditorUserPermissionApiReadBasicTests(EditorUserPermissionApiReadCommon): + + """ + Basic read operations + """ + + def test_read_editor_permission_list(self): + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + def test_read_editor_permission_list_invalid(self): + response = self.client.get("/rest/datasets/99999/editor_permissions/users") + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_read_editor_permission_details_by_pk(self): + response = self.client.get("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, self.userid)) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["editor_permissions"], self.permissionid) + self.assertEqual(response.data["user_id"], self.userid) + + def test_read_editor_permission_details_by_pk_invalid(self): + response = self.client.get("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, "invalid")) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + diff --git a/src/metax_api/tests/api/rest/base/views/editorpremissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpremissions/write.py new file mode 100644 index 00000000..34a2be06 --- /dev/null +++ b/src/metax_api/tests/api/rest/base/views/editorpremissions/write.py @@ -0,0 +1,166 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + + +from json import load as json_load + +from django.core.management import call_command + +from rest_framework import status +from rest_framework.test import APITestCase + +from metax_api.models import EditorUserPermission +from metax_api.tests.utils import TestClassUtils, test_data_file_path + + +class EditorUserPermissionApiWriteCommon(APITestCase, TestClassUtils): + @classmethod + def setUpClass(cls): + """ + Loaded only once for test cases inside this class. + """ + call_command("loaddata", test_data_file_path, verbosity=0) + super(EditorUserPermissionApiWriteCommon, cls).setUpClass() + + def setUp(self): + self.cr_from_test_data = self._get_whole_object_from_test_data("catalogrecord", requested_pk=1) + self.crid = self.cr_from_test_data['pk'] + self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] + self.editor_user_permission = self._get_whole_object_from_test_data("editoruserpermission", requested_pk=1) + self.userid = self.editor_user_permission["fields"]["user_id"] + self._use_http_authorization() + + def _get_whole_object_from_test_data(self, model_name, requested_pk=0): + + with open(test_data_file_path) as test_data_file: + test_data = json_load(test_data_file) + + model = "metax_api.%s" % model_name + i = 0 + + for row in test_data: + if row["model"] == model: + if row["pk"] == requested_pk: + obj = { + "id": row["pk"], + } + obj.update(row) + return obj + + raise Exception( + "Could not find model %s from test data with pk == %d. " + "Are you certain you generated rows for model %s in generate_test_data.py?" + % (model_name, requested_pk, model_name) + ) + + +class EditorUserPermissionApiWriteBasicTests(EditorUserPermissionApiWriteCommon): + + """ + Basic read operations + """ + + def test_write_editor_permission(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "test_editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + self.assertEqual(response.data["editor_permissions"], self.permissionid) + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), user_count + 1) + + def test_write_editor_permission_invalid_data(self): + self._set_http_authorization("service") + data = {"role": "editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), user_count) + + def test_write_editor_permission_existing_userid(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "double_editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), user_count + 1) + + def test_write_editor_permission_change_values(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "change_editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + new_permission = EditorUserPermission.objects.get(user_id="change_editor", editor_permissions_id=self.permissionid) + new_permission.generate_verification_token() + new_data = {"verified": True, "verification_token": new_permission.verification_token} + response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, + response.data.get('user_id')), + new_data, format="json") + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + self.assertEqual(response.data.get('verified'), True) + self.assertEqual(response.data.get('verification_token'), new_permission.verification_token) + + def test_write_editor_permission_remove_users(self): + self._set_http_authorization("service") + data = {"role": "creator", "user_id": "new_creator"} + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + response = self.client.delete("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get('user_id'))) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + for user in response.data: + if user.get('role') == 'creator': + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, user.get('user_id'))) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) + else: + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, user.get('user_id'))) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + + def test_write_editor_permission_add_removed_user(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "new_editor"} + # add + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + new_permission = EditorUserPermission.objects.get(user_id="new_editor", editor_permissions_id=self.permissionid) + new_permission.generate_verification_token() + new_data = {"verified": True, "verification_token": new_permission.verification_token} + # change + response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, + response.data.get('user_id')), + new_data, format="json") + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + self.assertEqual(response.data.get('verified'), True) + self.assertEqual(response.data.get('verification_token'), new_permission.verification_token) + # remove + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get('user_id'))) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + removed_user = EditorUserPermission.objects_unfiltered.get(user_id="new_editor", + editor_permissions_id=self.permissionid) + self.assertEqual(removed_user.removed, True) + response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + self.assertEqual(response.data.get('verification_token'), None) + self.assertEqual(response.data.get('removed'), False) + From 44e8208f755a012895dd7ca622443a44480058df Mon Sep 17 00:00:00 2001 From: aptiaine Date: Mon, 1 Nov 2021 15:38:25 +0200 Subject: [PATCH 098/160] Editor rights swagger documentation --- src/metax_api/swagger/v1/swagger.yaml | 175 +++++++++++++++++++++++++- 1 file changed, 170 insertions(+), 5 deletions(-) diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index 9c76ecc8..51a84d74 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -640,7 +640,7 @@ paths: in: query description: Sets paging on with default limit of 10 required: false - type: bolean + type: boolean - name: offset in: query description: Offset for paging @@ -1414,8 +1414,133 @@ paths: description: Resource not found. tags: - Dataset API - - + /rest/datasets/{CRID}/editor_permissions/users: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + type: array + items: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + post: + summary: Create a new editor permission of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: body + in: body + schema: + type: object + properties: + user_id: + type: string + role: + type: string + responses: + "201": + description: Successful operation, return created editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + /rest/datasets/{CRID}/editor_permissions/users/{USER_ID}: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + patch: + summary: Update role or enable verified + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + - name: body + in: body + schema: + $ref: '#/definitions/EditorUserPermission' + responses: + "200": + description: Successful operation, return changed editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + delete: + summary: Editorpermission marked as removed + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + '200': + description: Successful operation + '400': + description: Bad request + '403': + description: Forbidden + '404': + description: Not found + tags: + - Dataset API # Contract API /rest/contracts: get: @@ -2043,8 +2168,48 @@ definitions: count_cumulative: type: number format: float - - + EditorUserPermission: + type: object + properties: + id: + type: integer + readOnly: true + active: + type: boolean + removed: + type: boolean + date_modified: + type: string + format: date-time + readOnly: true + user_modified: + type: string + date_created: + type: string + format: date-time + readOnly: true + user_created: + type: string + service_modified: + type: string + service_created: + type: string + date_removed: + type: string + format: date-time + user_id: + type: string + role: + type: string + verified: + type: boolean + verification_token: + type: string + verification_token_expires: + type: string + format: date-time + editor_permission_id: + type: integer examples: count_datasets: type: object From 296685f9bfe31f12166282b633e1d915f6eab0a4 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Fri, 5 Nov 2021 14:03:59 +0200 Subject: [PATCH 099/160] refactoring all endpoints --- src/metax_api/api/rest/base/router.py | 6 +- .../editor_permissions_serializer.py | 6 +- .../api/rest/base/views/common_view.py | 4 +- .../base/views/editor_permissions_view.py | 115 ++++++++++-------- src/metax_api/api/rest/v2/router.py | 6 + .../__init__.py | 0 .../read.py | 9 +- .../write.py | 0 8 files changed, 86 insertions(+), 60 deletions(-) rename src/metax_api/tests/api/rest/base/views/{editorpremissions => editorpermissions}/__init__.py (100%) rename src/metax_api/tests/api/rest/base/views/{editorpremissions => editorpermissions}/read.py (87%) rename src/metax_api/tests/api/rest/base/views/{editorpremissions => editorpermissions}/write.py (100%) diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index 160e1bd0..ac745be9 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -85,11 +85,7 @@ def get_default_basename(self, viewset): ) router.register( - r"datasets/(?P.+)/editor_permissions/users/(?P.+)$", - EditorPermissionViewSet, -) -router.register( - r"datasets/(?P.+)/editor_permissions/users$", + "datasets/(?P.+)/editor_permissions/users", EditorPermissionViewSet, ) diff --git a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py index e38dc313..8da691f4 100644 --- a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py +++ b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py @@ -10,14 +10,14 @@ class EditorPermissionsSerializer(ModelSerializer): class Meta: model = EditorUserPermission - fields = '__all__' + fields = "__all__" extra_kwargs = CommonSerializer.Meta.extra_kwargs def validate(self, attrs): data = ModelSerializer.validate(self, attrs) - if data.get('verified') and data.get('verification_token') in EMPTY_VALUES: - raise ValidationError({'verification_token': 'Verification token missing'}) + if data.get("verified") and data.get("verification_token") in EMPTY_VALUES: + raise ValidationError({"verification_token": "Verification token missing"}) return data diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index 14de18ca..3e50d00c 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -373,7 +373,9 @@ def _check_and_store_bulk_error(self, request, response): """ if "failed" in response.data and len(response.data["failed"]): try: - error_json = ApiErrorSerializerV2.request_to_json(self.request, response, other={"bulk_request": True}) + error_json = ApiErrorSerializerV2.request_to_json( + self.request, response, other={"bulk_request": True} + ) response.data["error_identifier"] = error_json["identifier"] rabbitmq.publish(error_json, exchange="apierrors") except Exception as e: diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py index a63d29f3..dbc4fd8c 100644 --- a/src/metax_api/api/rest/base/views/editor_permissions_view.py +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -16,6 +16,7 @@ from metax_api.models import CatalogRecord from metax_api.models.catalog_record import PermissionRole, EditorUserPermission +from metax_api.services import CommonService from ..serializers import EditorPermissionsSerializer from .common_view import CommonViewSet @@ -24,73 +25,79 @@ class EditorPermissionViewSet(CommonViewSet): + lookup_field = "user_id" + + serializer_class = EditorPermissionsSerializer def __init__(self, *args, **kwargs): - # As opposed to other views, do not set json schema here - # It is done in the serializer super(EditorPermissionViewSet, self).__init__(*args, **kwargs) - def list(self, request, *args, **kwargs): - cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) - if 'user_id' in kwargs: - try: - user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) - except Exception as exc: - return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) - editorserializer = EditorPermissionsSerializer(user) + def get_queryset(self): + if CommonService.is_primary_key(self.kwargs['cr_identifier']): + cr = get_object_or_404(CatalogRecord, pk=int(self.kwargs['cr_identifier'])) else: - editorserializer = EditorPermissionsSerializer(cr.editor_permissions.users.all(), many=True) + cr = get_object_or_404(CatalogRecord, identifier=self.kwargs['cr_identifier']) + return cr.editor_permissions.users + + def list(self, request, *args, **kwargs): + users = self.get_queryset() + editorserializer = EditorPermissionsSerializer(users.all(), many=True) return Response(editorserializer.data) def create(self, request, *args, **kwargs): data = request.data - cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) - if 'user_id' in data: - editorserializer = None - removed_user = EditorUserPermission.objects_unfiltered.filter(user_id=data.get('user_id'), - editor_permissions_id=cr.editor_permissions_id).first() - data['verified'] = False - if removed_user not in EMPTY_VALUES and removed_user.removed is True: - data['verification_token'] = None - data['date_modified'] = datetime.datetime.now() - data['date_removed'] = None - data['removed'] = False - editorserializer = EditorPermissionsSerializer(removed_user, data=data, partial=True) - elif removed_user not in EMPTY_VALUES and removed_user.removed is False: - return Response({'user_id': "User_id already exists"}, status=status.HTTP_400_BAD_REQUEST) - else: - data['editor_permissions'] = cr.editor_permissions.pk - data['date_created'] = datetime.datetime.now() - editorserializer = EditorPermissionsSerializer(data=data) - if editorserializer.is_valid(): - editorserializer.save() - return Response(editorserializer.data, status=status.HTTP_201_CREATED) - else: - return Response(editorserializer.errors, status=status.HTTP_400_BAD_REQUEST) + + perms_id = self.get_queryset().instance.id + if "user_id" not in data: + return Response({"user_id": "Missing user_id"}, status=status.HTTP_400_BAD_REQUEST) + + editorserializer = None + removed_user = EditorUserPermission.objects_unfiltered.filter( + user_id=data.get('user_id'), editor_permissions_id=perms_id).first() + data['verified'] = False + if removed_user not in EMPTY_VALUES and removed_user.removed is True: + data['verification_token'] = None + data['date_modified'] = datetime.datetime.now() + data['date_removed'] = None + data['removed'] = False + editorserializer = EditorPermissionsSerializer(removed_user, data=data, partial=True) + elif removed_user not in EMPTY_VALUES and removed_user.removed is False: + return Response( + {'user_id': "User_id already exists"}, status=status.HTTP_400_BAD_REQUEST + ) else: - return Response({'user_id': 'Missing user_id'}, status=status.HTTP_400_BAD_REQUEST) + data['editor_permissions'] = perms_id + data['date_created'] = datetime.datetime.now() + editorserializer = EditorPermissionsSerializer(data=data) + if editorserializer.is_valid(): + editorserializer.save() + return Response(editorserializer.data, status=status.HTTP_201_CREATED) + else: + return Response(editorserializer.errors, status=status.HTTP_400_BAD_REQUEST) - def destroy_bulk(self, request, *args, **kwargs): - cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) - try: - user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) - except Exception as exc: - return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) + def destroy(self, request, *args, **kwargs): + user = self.get_object() + users = self.get_queryset() - creators = cr.editor_permissions.users.filter(role=PermissionRole.CREATOR, removed=False).count() + creators = users.filter(role=PermissionRole.CREATOR, removed=False).count() if user.role == PermissionRole.CREATOR and creators < 2: - return Response({"error": "Can't delete last creator"}, status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error": "Can't delete last creator"}, status=status.HTTP_400_BAD_REQUEST + ) else: user.remove() return Response(status=status.HTTP_200_OK) - def partial_update_bulk(self, request, **kwargs): + def partial_update(self, request, **kwargs): data = request.data - cr = get_object_or_404(CatalogRecord, pk=kwargs['cr_identifier']) - try: - user = cr.editor_permissions.users.get(user_id=kwargs['user_id']) - except Exception as exc: - return Response({'error': 'Unknown user'}, status=status.HTTP_400_BAD_REQUEST) + user = self.get_object() + users = self.get_queryset() + + if 'role' in data and user.role == PermissionRole.CREATOR: + creators = users.filter(role=PermissionRole.CREATOR, removed=False).count() + if creators < 2 and data.get('role') != PermissionRole.CREATOR: + return Response({"error": "Can't change last creator"}, status=status.HTTP_400_BAD_REQUEST) + data['date_modified'] = datetime.datetime.now() serializer = EditorPermissionsSerializer(user, data=data, partial=True) if serializer.is_valid(): @@ -98,3 +105,13 @@ def partial_update_bulk(self, request, **kwargs): return Response(serializer.data) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def update_bulk(self, request, *args, **kwargs): + return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) + + def partial_update_bulk(self, request, *args, **kwargs): + return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) + + def destroy_bulk(self, request, *args, **kwargs): + return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) + diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index 46e703d5..10bcd0c9 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -29,6 +29,7 @@ FileStorageViewSet, FileViewSet, SchemaViewSet, + EditorPermissionViewSet, ) from metax_api.api.rest.v2.views import ApiErrorViewSetV2 @@ -110,4 +111,9 @@ def __init__(self, *args, **kwargs): DatasetViewSet, ) +router_v2.register( + "datasets/(?P.+)/editor_permissions/users", + EditorPermissionViewSet, +) + api_urlpatterns = router_v1.urls + router_v2.urls diff --git a/src/metax_api/tests/api/rest/base/views/editorpremissions/__init__.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py similarity index 100% rename from src/metax_api/tests/api/rest/base/views/editorpremissions/__init__.py rename to src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py diff --git a/src/metax_api/tests/api/rest/base/views/editorpremissions/read.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py similarity index 87% rename from src/metax_api/tests/api/rest/base/views/editorpremissions/read.py rename to src/metax_api/tests/api/rest/base/views/editorpermissions/read.py index 2ce25083..af7a67e9 100644 --- a/src/metax_api/tests/api/rest/base/views/editorpremissions/read.py +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py @@ -27,6 +27,7 @@ def setUpClass(cls): def setUp(self): self.cr_from_test_data = self._get_whole_object_from_test_data("catalogrecord", requested_pk=1) self.crid = self.cr_from_test_data['pk'] + self.identifier = "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] self.editor_user_permission = self._get_whole_object_from_test_data("editoruserpermission", requested_pk=1) self.userid = self.editor_user_permission["fields"]["user_id"] @@ -62,10 +63,14 @@ class EditorUserPermissionApiReadBasicTests(EditorUserPermissionApiReadCommon): Basic read operations """ - def test_read_editor_permission_list(self): + def test_read_editor_permission_list_with_pk(self): response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) self.assertEqual(response.status_code, status.HTTP_200_OK) + def test_read_editor_permission_list_with_uuid(self): + response = self.client.get("/rest/datasets/%s/editor_permissions/users" % self.identifier) + self.assertEqual(response.status_code, status.HTTP_200_OK) + def test_read_editor_permission_list_invalid(self): response = self.client.get("/rest/datasets/99999/editor_permissions/users") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) @@ -78,5 +83,5 @@ def test_read_editor_permission_details_by_pk(self): def test_read_editor_permission_details_by_pk_invalid(self): response = self.client.get("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, "invalid")) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) diff --git a/src/metax_api/tests/api/rest/base/views/editorpremissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py similarity index 100% rename from src/metax_api/tests/api/rest/base/views/editorpremissions/write.py rename to src/metax_api/tests/api/rest/base/views/editorpermissions/write.py From c300d2d220199bfe9ffa67d1e05c0a753b9922b3 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Mon, 8 Nov 2021 13:30:20 +0200 Subject: [PATCH 100/160] Editing rights: removing verification_token and adding access limits --- .../editor_permissions_serializer.py | 3 --- .../base/views/editor_permissions_view.py | 5 ++-- .../migrations/0040_auto_20211108_1256.py | 21 ++++++++++++++++ src/metax_api/models/catalog_record.py | 24 ++----------------- .../base/views/editorpermissions/write.py | 16 +++++-------- 5 files changed, 32 insertions(+), 37 deletions(-) create mode 100644 src/metax_api/migrations/0040_auto_20211108_1256.py diff --git a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py index 8da691f4..b306b416 100644 --- a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py +++ b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py @@ -17,7 +17,4 @@ class Meta: def validate(self, attrs): data = ModelSerializer.validate(self, attrs) - if data.get("verified") and data.get("verification_token") in EMPTY_VALUES: - raise ValidationError({"verification_token": "Verification token missing"}) - return data diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py index dbc4fd8c..78e6de9b 100644 --- a/src/metax_api/api/rest/base/views/editor_permissions_view.py +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -16,6 +16,7 @@ from metax_api.models import CatalogRecord from metax_api.models.catalog_record import PermissionRole, EditorUserPermission +from metax_api.permissions import ServicePermissions from metax_api.services import CommonService from ..serializers import EditorPermissionsSerializer @@ -26,7 +27,7 @@ class EditorPermissionViewSet(CommonViewSet): lookup_field = "user_id" - + permission_classes = [ServicePermissions,] serializer_class = EditorPermissionsSerializer def __init__(self, *args, **kwargs): @@ -56,7 +57,7 @@ def create(self, request, *args, **kwargs): user_id=data.get('user_id'), editor_permissions_id=perms_id).first() data['verified'] = False if removed_user not in EMPTY_VALUES and removed_user.removed is True: - data['verification_token'] = None + #data['verification_token'] = None data['date_modified'] = datetime.datetime.now() data['date_removed'] = None data['removed'] = False diff --git a/src/metax_api/migrations/0040_auto_20211108_1256.py b/src/metax_api/migrations/0040_auto_20211108_1256.py new file mode 100644 index 00000000..d5c9dd97 --- /dev/null +++ b/src/metax_api/migrations/0040_auto_20211108_1256.py @@ -0,0 +1,21 @@ +# Generated by Django 3.1.13 on 2021-11-08 10:56 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0039_add_editorpermissions'), + ] + + operations = [ + migrations.RemoveField( + model_name='editoruserpermission', + name='verification_token', + ), + migrations.RemoveField( + model_name='editoruserpermission', + name='verification_token_expires', + ), + ] diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 9f447e55..6702a845 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -69,10 +69,12 @@ class EditorPermissions(models.Model): """ id = models.BigAutoField(primary_key=True, editable=False) + class PermissionRole(models.TextChoices): CREATOR = "creator" EDITOR = "editor" + class EditorUserPermission(Common): """ Table for attaching user roles to an EditorPermissions object. @@ -85,8 +87,6 @@ class EditorUserPermission(Common): user_id = models.CharField(max_length=200) role = models.CharField(max_length=16, choices=PermissionRole.choices) verified = models.BooleanField(default=False) - verification_token = models.CharField(max_length=32, null=True) - verification_token_expires = models.DateTimeField(null=True) class Meta: indexes = [ @@ -107,27 +107,7 @@ class Meta: def __repr__(self): return f"" - def clear_verification_token(self): - self.verification_token = None - self.verification_token_expires = None - - def generate_verification_token(self): - self.verification_token = get_random_string(length=32) - self.verification_token_expires = datetime.now() + timedelta(days=14) - - def verify(self, token): - if not token or token != self.verification_token or self.removed: - _logger.error("Invalid token or already used") - return False - if datetime.now() >= self.verification_token_expires: - _logger.error("Token expired") - return False - - self.verified = True - self.clear_verification_token() - def delete(self, *args, **kwargs): - self.clear_verification_token() super().remove(*args, **kwargs) diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py index 34a2be06..a29589df 100644 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py @@ -102,20 +102,17 @@ def test_write_editor_permission_existing_userid(self): def test_write_editor_permission_change_values(self): self._set_http_authorization("service") - data = {"role": "editor", "user_id": "change_editor"} + data = {"role": "creator", "user_id": "change_editor"} response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) user_count = len(response.data) response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_permission = EditorUserPermission.objects.get(user_id="change_editor", editor_permissions_id=self.permissionid) - new_permission.generate_verification_token() - new_data = {"verified": True, "verification_token": new_permission.verification_token} + new_data = {"role": "editor", "verified": True} response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, response.data.get('user_id')), new_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('verified'), True) - self.assertEqual(response.data.get('verification_token'), new_permission.verification_token) + self.assertEqual(response.data.get("role"), "editor") def test_write_editor_permission_remove_users(self): self._set_http_authorization("service") @@ -143,15 +140,15 @@ def test_write_editor_permission_add_removed_user(self): response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) new_permission = EditorUserPermission.objects.get(user_id="new_editor", editor_permissions_id=self.permissionid) - new_permission.generate_verification_token() - new_data = {"verified": True, "verification_token": new_permission.verification_token} + + new_data = {"verified": True} # change response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, response.data.get('user_id')), new_data, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(response.data.get('verified'), True) - self.assertEqual(response.data.get('verification_token'), new_permission.verification_token) + # remove response = self.client.delete( "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get('user_id'))) @@ -161,6 +158,5 @@ def test_write_editor_permission_add_removed_user(self): self.assertEqual(removed_user.removed, True) response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data.get('verification_token'), None) self.assertEqual(response.data.get('removed'), False) From 180f004faaa5e55972e988840414d5214cfaa7b8 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Tue, 9 Nov 2021 10:16:56 +0200 Subject: [PATCH 101/160] updating swagger documentation --- src/metax_api/swagger/v1/swagger.yaml | 5 - src/metax_api/swagger/v2/swagger.yaml | 166 +++++++++++++++++++++++++- 2 files changed, 164 insertions(+), 7 deletions(-) diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index 51a84d74..fdd6bdfc 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -2203,11 +2203,6 @@ definitions: type: string verified: type: boolean - verification_token: - type: string - verification_token_expires: - type: string - format: date-time editor_permission_id: type: integer examples: diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 682ffdf8..2da32eaa 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -1598,7 +1598,133 @@ paths: description: Resource not found. tags: - Dataset API - + /rest/datasets/{CRID}/editor_permissions/users: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + type: array + items: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + post: + summary: Create a new editor permission of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: body + in: body + schema: + type: object + properties: + user_id: + type: string + role: + type: string + responses: + "201": + description: Successful operation, return created editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + /rest/datasets/{CRID}/editor_permissions/users/{USER_ID}: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + patch: + summary: Update role or enable verified + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + - name: body + in: body + schema: + $ref: '#/definitions/EditorUserPermission' + responses: + "200": + description: Successful operation, return changed editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + delete: + summary: Editorpermission marked as removed + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + '200': + description: Successful operation + '400': + description: Bad request + '403': + description: Forbidden + '404': + description: Not found + tags: + - Dataset API # Contract API /rest/v2/contracts: @@ -2389,7 +2515,43 @@ definitions: count_cumulative: type: number format: float - + EditorUserPermission: + type: object + properties: + id: + type: integer + readOnly: true + active: + type: boolean + removed: + type: boolean + date_modified: + type: string + format: date-time + readOnly: true + user_modified: + type: string + date_created: + type: string + format: date-time + readOnly: true + user_created: + type: string + service_modified: + type: string + service_created: + type: string + date_removed: + type: string + format: date-time + user_id: + type: string + role: + type: string + verified: + type: boolean + editor_permission_id: + type: integer examples: count_datasets: From f340d25526f3e69b9b9ead1876e337bbccfcfb93 Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Wed, 10 Nov 2021 12:56:55 +0200 Subject: [PATCH 102/160] Remove verified from EditorUserPermission --- .../base/views/editor_permissions_view.py | 2 - ...41_remove_editoruserpermission_verified.py | 17 ++++ src/metax_api/models/catalog_record.py | 4 +- .../services/catalog_record_service.py | 3 +- src/metax_api/swagger/v2/swagger.yaml | 4 +- .../api/rest/base/views/datasets/read.py | 6 -- .../api/rest/base/views/datasets/write.py | 3 +- .../base/views/editorpermissions/write.py | 11 +-- .../tests/testdata/generate_test_data.py | 1 - src/metax_api/tests/testdata/test_data.json | 78 +++++++------------ 10 files changed, 48 insertions(+), 81 deletions(-) create mode 100644 src/metax_api/migrations/0041_remove_editoruserpermission_verified.py diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py index 78e6de9b..6a94d0fa 100644 --- a/src/metax_api/api/rest/base/views/editor_permissions_view.py +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -55,9 +55,7 @@ def create(self, request, *args, **kwargs): editorserializer = None removed_user = EditorUserPermission.objects_unfiltered.filter( user_id=data.get('user_id'), editor_permissions_id=perms_id).first() - data['verified'] = False if removed_user not in EMPTY_VALUES and removed_user.removed is True: - #data['verification_token'] = None data['date_modified'] = datetime.datetime.now() data['date_removed'] = None data['removed'] = False diff --git a/src/metax_api/migrations/0041_remove_editoruserpermission_verified.py b/src/metax_api/migrations/0041_remove_editoruserpermission_verified.py new file mode 100644 index 00000000..b16d07f0 --- /dev/null +++ b/src/metax_api/migrations/0041_remove_editoruserpermission_verified.py @@ -0,0 +1,17 @@ +# Generated by Django 3.1.13 on 2021-11-10 09:30 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0040_auto_20211108_1256'), + ] + + operations = [ + migrations.RemoveField( + model_name='editoruserpermission', + name='verified', + ), + ] diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 6702a845..14736162 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -86,7 +86,6 @@ class EditorUserPermission(Common): ) user_id = models.CharField(max_length=200) role = models.CharField(max_length=16, choices=PermissionRole.choices) - verified = models.BooleanField(default=False) class Meta: indexes = [ @@ -105,7 +104,7 @@ class Meta: ] def __repr__(self): - return f"" + return f"" def delete(self, *args, **kwargs): super().remove(*args, **kwargs) @@ -3111,7 +3110,6 @@ def _add_creator_editor_user_permission(self): perm = EditorUserPermission( editor_permissions=self.editor_permissions, user_id=self.metadata_provider_user, - verified=True, role=PermissionRole.CREATOR, date_created=self.date_created, date_modified=self.date_modified, diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 09901390..73277998 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -138,7 +138,7 @@ def get_queryset_search_params(cls, request): @staticmethod def filter_by_editor_permissions_user(request, queryset_search_params): """ - Add filter for querying datasets where user has verified editor user permissions. + Add filter for querying datasets where user has editor user permissions. """ user_id = request.query_params["editor_permissions_user"] @@ -150,7 +150,6 @@ def filter_by_editor_permissions_user(request, queryset_search_params): raise Http403({"detail": ["Provided editor_permissions_user does not match current user"]}) queryset_search_params["editor_permissions__users__user_id"] = user_id - queryset_search_params["editor_permissions__users__verified"] = True queryset_search_params["editor_permissions__users__removed"] = False @staticmethod diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 2da32eaa..f7c29ee7 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -1674,7 +1674,7 @@ paths: tags: - Dataset API patch: - summary: Update role or enable verified + summary: Update role parameters: - name: CRID in: path @@ -2548,8 +2548,6 @@ definitions: type: string role: type: string - verified: - type: boolean editor_permission_id: type: integer diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index 15e20e12..f2a718f0 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -971,12 +971,6 @@ def test_filter_by_editor_permissions_user_ok(self): response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") self.assertEqual(response.data["count"], 1) - def test_filter_by_editor_permissions_user_not_verified(self): - cr = CatalogRecord.objects.get(pk=1) - cr.editor_permissions.users.update(user_id='test_user_x', verified=False) - response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") - self.assertEqual(response.data["count"], 0) - def test_filter_by_editor_permissions_user_removed(self): cr = CatalogRecord.objects.get(pk=1) cr.editor_permissions.users.update(user_id='test_user_x') diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index d7613a92..d9a938f1 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -564,12 +564,11 @@ def test_create_catalog_record_adds_creator_permission(self): ) cr = CatalogRecord.objects.get(id=response.data["id"]) self.assertEqual( - list(cr.editor_permissions.users.values("user_id", "role", "verified")), + list(cr.editor_permissions.users.values("user_id", "role")), [ { "user_id": self.cr_test_data["metadata_provider_user"], "role": "creator", - "verified": True, } ], ) diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py index a29589df..5bc831f0 100644 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py @@ -107,7 +107,7 @@ def test_write_editor_permission_change_values(self): user_count = len(response.data) response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_data = {"role": "editor", "verified": True} + new_data = {"role": "editor"} response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, response.data.get('user_id')), new_data, format="json") @@ -139,15 +139,6 @@ def test_write_editor_permission_add_removed_user(self): # add response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_permission = EditorUserPermission.objects.get(user_id="new_editor", editor_permissions_id=self.permissionid) - - new_data = {"verified": True} - # change - response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, - response.data.get('user_id')), - new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get('verified'), True) # remove response = self.client.delete( diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index 04da0d5c..b6ffcabc 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -384,7 +384,6 @@ def add_editor_permissions(editor_permissions, dataset): "date_created": dataset["fields"]["date_created"], "editor_permissions_id": pk, "role": "creator", - "verified": True, }, "model": "metax_api.editoruserpermission", "pk": pk + 1, diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index aec0e37a..ba51c435 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5722,8 +5722,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 0, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 1 @@ -5738,8 +5737,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 2, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 3 @@ -5754,8 +5752,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 4, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 5 @@ -5770,8 +5767,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 6, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 7 @@ -5786,8 +5782,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 8, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 9 @@ -5802,8 +5797,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 10, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 11 @@ -5818,8 +5812,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 12, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 13 @@ -5834,8 +5827,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 14, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 15 @@ -5850,8 +5842,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 16, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 17 @@ -5866,8 +5857,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 18, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 19 @@ -5882,8 +5872,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 20, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 21 @@ -5898,8 +5887,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 22, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 23 @@ -5914,8 +5902,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 24, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 25 @@ -5930,8 +5917,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 26, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 27 @@ -5946,8 +5932,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 28, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 29 @@ -5962,8 +5947,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 30, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 31 @@ -5978,8 +5962,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 32, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 33 @@ -5994,8 +5977,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 34, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 35 @@ -6010,8 +5992,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 36, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 37 @@ -6026,8 +6007,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 38, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 39 @@ -6042,8 +6022,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 40, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 41 @@ -6058,8 +6037,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 42, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 43 @@ -6074,8 +6052,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 44, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 45 @@ -6090,8 +6067,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 46, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 47 @@ -6106,8 +6082,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 48, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 49 @@ -6122,8 +6097,7 @@ "date_created": "2017-05-23T10:07:22Z", "editor_permissions_id": 50, "role": "creator", - "user_id": "abc-user-123", - "verified": true + "user_id": "abc-user-123" }, "model": "metax_api.editoruserpermission", "pk": 51 From c8dce2bd2e4615235b96565725ef93b57804c203 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Thu, 11 Nov 2021 13:35:15 +0200 Subject: [PATCH 103/160] Fixed migrations and test data --- ...sions.py => 0041_add_editorpermissions.py} | 2 +- ...108_1256.py => 0042_auto_20211108_1256.py} | 2 +- ...3_remove_editoruserpermission_verified.py} | 2 +- src/metax_api/tests/testdata/test_data.json | 96 ------------------- 4 files changed, 3 insertions(+), 99 deletions(-) rename src/metax_api/migrations/{0039_add_editorpermissions.py => 0041_add_editorpermissions.py} (99%) rename src/metax_api/migrations/{0040_auto_20211108_1256.py => 0042_auto_20211108_1256.py} (89%) rename src/metax_api/migrations/{0041_remove_editoruserpermission_verified.py => 0043_remove_editoruserpermission_verified.py} (85%) diff --git a/src/metax_api/migrations/0039_add_editorpermissions.py b/src/metax_api/migrations/0041_add_editorpermissions.py similarity index 99% rename from src/metax_api/migrations/0039_add_editorpermissions.py rename to src/metax_api/migrations/0041_add_editorpermissions.py index f268e437..b2ea2fc5 100644 --- a/src/metax_api/migrations/0039_add_editorpermissions.py +++ b/src/metax_api/migrations/0041_add_editorpermissions.py @@ -71,7 +71,7 @@ def revert(apps, schema_editor): class Migration(migrations.Migration): dependencies = [ - ('metax_api', '0038_remove_catalogrecord_editor'), + ('metax_api', '0040_auto_20211006_1116'), ] operations = [ diff --git a/src/metax_api/migrations/0040_auto_20211108_1256.py b/src/metax_api/migrations/0042_auto_20211108_1256.py similarity index 89% rename from src/metax_api/migrations/0040_auto_20211108_1256.py rename to src/metax_api/migrations/0042_auto_20211108_1256.py index d5c9dd97..51e1afcb 100644 --- a/src/metax_api/migrations/0040_auto_20211108_1256.py +++ b/src/metax_api/migrations/0042_auto_20211108_1256.py @@ -6,7 +6,7 @@ class Migration(migrations.Migration): dependencies = [ - ('metax_api', '0039_add_editorpermissions'), + ('metax_api', '0041_add_editorpermissions'), ] operations = [ diff --git a/src/metax_api/migrations/0041_remove_editoruserpermission_verified.py b/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py similarity index 85% rename from src/metax_api/migrations/0041_remove_editoruserpermission_verified.py rename to src/metax_api/migrations/0043_remove_editoruserpermission_verified.py index b16d07f0..220acfab 100644 --- a/src/metax_api/migrations/0041_remove_editoruserpermission_verified.py +++ b/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py @@ -6,7 +6,7 @@ class Migration(migrations.Migration): dependencies = [ - ('metax_api', '0040_auto_20211108_1256'), + ('metax_api', '0042_auto_20211108_1256'), ] operations = [ diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index ba51c435..96607ab1 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -6117,12 +6117,6 @@ "dataset_version_set": 1, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "identifier": "qvain", - "owner_id": "053bffbcc41edad4853bea91fc42ea18", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9001" - }, "editor_permissions_id": 0, "files": [ 1, @@ -6279,12 +6273,6 @@ "dataset_version_set": 2, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "identifier": "qvain", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9002" - }, "editor_permissions_id": 2, "files": [ 3, @@ -6441,12 +6429,6 @@ "dataset_version_set": 3, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "05593961536b76fa825281ccaedd4d4f", - "identifier": "qvain", - "owner_id": "05593961536b76fa825281ccaedd4d4f", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9003" - }, "editor_permissions_id": 4, "files": [ 5, @@ -6603,12 +6585,6 @@ "dataset_version_set": 4, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea4dade5ab2145954f56d4b51cef0", - "identifier": "qvain", - "owner_id": "055ea4dade5ab2145954f56d4b51cef0", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9004" - }, "editor_permissions_id": 6, "files": [ 7, @@ -6765,12 +6741,6 @@ "dataset_version_set": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea531a6cac569425bed94459266ee", - "identifier": "qvain", - "owner_id": "055ea531a6cac569425bed94459266ee", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9005" - }, "editor_permissions_id": 8, "files": [ 9, @@ -6927,12 +6897,6 @@ "dataset_version_set": 6, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "identifier": "qvain", - "owner_id": "053bffbcc41edad4853bea91fc42ea18", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9006" - }, "editor_permissions_id": 10, "files": [ 11, @@ -7089,12 +7053,6 @@ "dataset_version_set": 7, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "identifier": "qvain", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9007" - }, "editor_permissions_id": 12, "files": [ 13, @@ -7251,12 +7209,6 @@ "dataset_version_set": 8, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "05593961536b76fa825281ccaedd4d4f", - "identifier": "qvain", - "owner_id": "05593961536b76fa825281ccaedd4d4f", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9008" - }, "editor_permissions_id": 14, "files": [ 15, @@ -7398,12 +7350,6 @@ "dataset_version_set": 9, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea4dade5ab2145954f56d4b51cef0", - "identifier": "qvain", - "owner_id": "055ea4dade5ab2145954f56d4b51cef0", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9009" - }, "editor_permissions_id": 16, "files": [ 17, @@ -7531,12 +7477,6 @@ "dataset_version_set": 10, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea531a6cac569425bed94459266ee", - "identifier": "qvain", - "owner_id": "055ea531a6cac569425bed94459266ee", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9010" - }, "editor_permissions_id": 18, "files": [ 19, @@ -7661,12 +7601,6 @@ "dataset_version_set": 11, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "identifier": "qvain", - "owner_id": "053bffbcc41edad4853bea91fc42ea18", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9011" - }, "editor_permissions_id": 20, "files": [ 1, @@ -8619,12 +8553,6 @@ "dataset_version_set": 12, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "identifier": "qvain", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9012" - }, "editor_permissions_id": 22, "files": [ 1, @@ -9577,12 +9505,6 @@ "dataset_version_set": 13, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "05593961536b76fa825281ccaedd4d4f", - "identifier": "qvain", - "owner_id": "05593961536b76fa825281ccaedd4d4f", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9013" - }, "editor_permissions_id": 24, "files": [ 22, @@ -11990,10 +11912,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "owner_id": "053bffbcc41edad4853bea91fc42ea18" - }, "editor_permissions_id": 46, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9624", "metadata_owner_org": "abc-org-123", @@ -12929,10 +12847,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "owner_id": "053d18ecb29e752cb7a35cd77b34f5fd" - }, "editor_permissions_id": 48, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9625", "metadata_owner_org": "abc-org-123", @@ -13868,10 +13782,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor": { - "creator_id": "053bffbcc41edad4853bea91fc42ea18", - "owner_id": "05593961536b76fa825281ccaedd4d4f" - }, "editor_permissions_id": 50, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9626", "metadata_owner_org": "abc-org-123", @@ -14810,12 +14720,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor": { - "creator_id": "055ea4dade5ab2145954f56d4b51cef0", - "identifier": "qvain", - "owner_id": "055ea4dade5ab2145954f56d4b51cef0", - "record_id": "955e904-e3dd-4d7e-99f1-3fed446f9027" - }, "editor_permissions_id": 18, "files": [ 19, From e5c8bbd2b5278984fcbdee95ea3169a7c50acef2 Mon Sep 17 00:00:00 2001 From: Toni Nurmi Date: Mon, 15 Nov 2021 08:37:56 +0000 Subject: [PATCH 104/160] CSCFAIRMETA-1191 Capture only catalogrecord delete signals for deletedobjects table --- src/metax_api/signals/post_delete.py | 33 ++++++++++++++------------- src/metax_api/tests/models/signals.py | 10 -------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/src/metax_api/signals/post_delete.py b/src/metax_api/signals/post_delete.py index 1ff39f75..3dd1a0f5 100644 --- a/src/metax_api/signals/post_delete.py +++ b/src/metax_api/signals/post_delete.py @@ -5,23 +5,24 @@ from django.dispatch import receiver from django.core.serializers.json import DjangoJSONEncoder from django.forms.models import model_to_dict -from ..models import DeletedObject +from ..models import DeletedObject, CatalogRecord, CatalogRecordV2 _logger = logging.getLogger(__name__) @receiver(post_delete) -def deleted_object_receiver(instance, *args, **kwargs): - try: - model_type = instance._meta.model.__name__ - if hasattr(instance, '_initial_data["date_created"]'): - instance._initial_data["date_created"] = instance._initial_data["date_created"].strftime("%m/%d/%Y, %H:%M:%S") - if hasattr(instance, 'date_created'): - instance.date_created = instance.date_created.strftime("%m/%d/%Y, %H:%M:%S") - if hasattr(instance, 'date_modified'): - instance.date_modified = instance.date_modified.strftime("%m/%d/%Y, %H:%M:%S") - instance = model_to_dict(instance) - deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) - DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) - except Exception as e: - _logger.error("cannot save Deleted Object. Discarding..") - _logger.debug(f"error: {e}") \ No newline at end of file +def deleted_object_receiver(instance, sender, *args, **kwargs): + if sender in [CatalogRecord, CatalogRecordV2]: + try: + model_type = instance._meta.model.__name__ + if hasattr(instance, '_initial_data["date_created"]'): + instance._initial_data["date_created"] = instance._initial_data["date_created"].strftime("%m/%d/%Y, %H:%M:%S") + if hasattr(instance, 'date_created'): + instance.date_created = instance.date_created.strftime("%m/%d/%Y, %H:%M:%S") + if hasattr(instance, 'date_modified'): + instance.date_modified = instance.date_modified.strftime("%m/%d/%Y, %H:%M:%S") + instance = model_to_dict(instance) + deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) + DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) + except Exception as e: + _logger.error("cannot save Deleted Object. Discarding..") + _logger.debug(f"error: {e}") \ No newline at end of file diff --git a/src/metax_api/tests/models/signals.py b/src/metax_api/tests/models/signals.py index 548474ce..b9c11aae 100644 --- a/src/metax_api/tests/models/signals.py +++ b/src/metax_api/tests/models/signals.py @@ -33,14 +33,4 @@ def test_deleting_catalog_record_creates_new_deleted_object(self): self.assertEqual(deleted_object_v2.model_name, "CatalogRecordV2") self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today) - def test_deleting_file_creates_new_deleted_object(self): - File.objects.get(pk=1).delete(hard=True) - deleted_object = DeletedObject.objects.last() - self.assertEqual(deleted_object.model_name, "File") - self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) - def test_deleting_directory_creates_new_deleted_object(self): - Directory.objects.get(pk=1).delete() - deleted_object = DeletedObject.objects.last() - self.assertEqual(deleted_object.model_name, "Directory") - self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) \ No newline at end of file From 0a22dc503dbc4b793e3630370671d10f62b820e8 Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 15 Nov 2021 16:29:04 +0200 Subject: [PATCH 105/160] Save apierrors in bulk, change debug logs to error or info type --- src/metax_api/services/rabbitmq_service.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index ef0a710d..d84ecddb 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -118,22 +118,29 @@ def consume_api_errors(self): channel = connection.channel() try: + errors = [] for method, _, body in channel.consume("metax-apierrors", inactivity_timeout=1): if method is None and body is None: channel.cancel() break try: - error = loads(body) - ApiError.objects.create(identifier=error["identifier"], error=error) - except DatabaseError as e: - _logger.error("cannot create API Error. Discarding..") - _logger.debug(f"error: {e}") + error_payload = loads(body) + error = ApiError(identifier=error_payload["identifier"], error=error_payload) + errors.append(error) + except Exception as e: + _logger.error(e) finally: channel.basic_ack(method.delivery_tag) + try: + ApiError.objects.bulk_create(errors, batch_size=5000) + except DatabaseError as e: + _logger.error("cannot create API Error. Discarding..") + _logger.error(f"error: {e}") + except Exception as e: _logger.error(e) finally: - _logger.debug("All ApiErrors were handled") + _logger.info("All ApiErrors were handled") connection.close() def init_exchanges(self): From 54c936f38e690972b079706a9dd1007bcf1be682 Mon Sep 17 00:00:00 2001 From: Toni Date: Mon, 15 Nov 2021 17:21:59 +0200 Subject: [PATCH 106/160] hotfix for too many database writes --- src/metax_api/services/rabbitmq_service.py | 19 +++++++++---- src/metax_api/signals/post_delete.py | 33 +++++++++++----------- src/metax_api/tests/models/signals.py | 10 ------- 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index 0e541693..bc2dbe4f 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -118,22 +118,29 @@ def consume_api_errors(self): channel = connection.channel() try: + errors = [] for method, _, body in channel.consume("metax-apierrors", inactivity_timeout=1): if method is None and body is None: channel.cancel() break try: - error = loads(body) - ApiError.objects.create(identifier=error["identifier"], error=error) - except DatabaseError as e: - _logger.error("cannot create API Error. Discarding..") - _logger.debug(f"error: {e}") + error_payload = loads(body) + error = ApiError(identifier=error_payload["identifier"], error=error_payload) + errors.append(error) + except Exception as e: + _logger.error(e) finally: channel.basic_ack(method.delivery_tag) + try: + ApiError.objects.bulk_create(errors, batch_size=5000) + except DatabaseError as e: + _logger.error("cannot create API Error. Discarding..") + _logger.error(f"error: {e}") + except Exception as e: _logger.error(e) finally: - _logger.debug("All ApiErrors were handled") + _logger.info("All ApiErrors were handled") connection.close() def init_exchanges(self): diff --git a/src/metax_api/signals/post_delete.py b/src/metax_api/signals/post_delete.py index 1ff39f75..3dd1a0f5 100644 --- a/src/metax_api/signals/post_delete.py +++ b/src/metax_api/signals/post_delete.py @@ -5,23 +5,24 @@ from django.dispatch import receiver from django.core.serializers.json import DjangoJSONEncoder from django.forms.models import model_to_dict -from ..models import DeletedObject +from ..models import DeletedObject, CatalogRecord, CatalogRecordV2 _logger = logging.getLogger(__name__) @receiver(post_delete) -def deleted_object_receiver(instance, *args, **kwargs): - try: - model_type = instance._meta.model.__name__ - if hasattr(instance, '_initial_data["date_created"]'): - instance._initial_data["date_created"] = instance._initial_data["date_created"].strftime("%m/%d/%Y, %H:%M:%S") - if hasattr(instance, 'date_created'): - instance.date_created = instance.date_created.strftime("%m/%d/%Y, %H:%M:%S") - if hasattr(instance, 'date_modified'): - instance.date_modified = instance.date_modified.strftime("%m/%d/%Y, %H:%M:%S") - instance = model_to_dict(instance) - deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) - DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) - except Exception as e: - _logger.error("cannot save Deleted Object. Discarding..") - _logger.debug(f"error: {e}") \ No newline at end of file +def deleted_object_receiver(instance, sender, *args, **kwargs): + if sender in [CatalogRecord, CatalogRecordV2]: + try: + model_type = instance._meta.model.__name__ + if hasattr(instance, '_initial_data["date_created"]'): + instance._initial_data["date_created"] = instance._initial_data["date_created"].strftime("%m/%d/%Y, %H:%M:%S") + if hasattr(instance, 'date_created'): + instance.date_created = instance.date_created.strftime("%m/%d/%Y, %H:%M:%S") + if hasattr(instance, 'date_modified'): + instance.date_modified = instance.date_modified.strftime("%m/%d/%Y, %H:%M:%S") + instance = model_to_dict(instance) + deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) + DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) + except Exception as e: + _logger.error("cannot save Deleted Object. Discarding..") + _logger.debug(f"error: {e}") \ No newline at end of file diff --git a/src/metax_api/tests/models/signals.py b/src/metax_api/tests/models/signals.py index 548474ce..b9c11aae 100644 --- a/src/metax_api/tests/models/signals.py +++ b/src/metax_api/tests/models/signals.py @@ -33,14 +33,4 @@ def test_deleting_catalog_record_creates_new_deleted_object(self): self.assertEqual(deleted_object_v2.model_name, "CatalogRecordV2") self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today) - def test_deleting_file_creates_new_deleted_object(self): - File.objects.get(pk=1).delete(hard=True) - deleted_object = DeletedObject.objects.last() - self.assertEqual(deleted_object.model_name, "File") - self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) - def test_deleting_directory_creates_new_deleted_object(self): - Directory.objects.get(pk=1).delete() - deleted_object = DeletedObject.objects.last() - self.assertEqual(deleted_object.model_name, "Directory") - self.assertEqual(deleted_object.date_deleted.strftime("%d/%m/%Y"), self.today) \ No newline at end of file From 3e9a28c72fd517c682e954ac2ddeabcdd4788f73 Mon Sep 17 00:00:00 2001 From: Toni Date: Thu, 18 Nov 2021 17:19:11 +0200 Subject: [PATCH 107/160] add datacatalog, permission role and related settings for Aalto --- src/metax_api/initialdata/datacatalogs.json | 54 +++++++++++++++++++ .../settings/components/access_control.py | 1 + src/metax_api/settings/components/common.py | 2 + src/metax_api/settings/environments/stable.py | 6 +-- 4 files changed, 60 insertions(+), 3 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 707ce27f..a0e1cdd0 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -465,4 +465,58 @@ "catalog_record_services_edit": "metax,repotronic", "catalog_record_services_create": "metax,repotronic", "catalog_record_services_read": "metax,repotronic" +}, +{ + "catalog_json": { + "title": { + "en": "ACRIS catalog", + "fi": "ACRIS katalogi" + }, + "language": [ + { + "identifier": "http://lexvo.org/id/iso639-3/fin" + } + ], + "harvested": false, + "publisher": { + "name": { + "en": "Aalto ACRIS", + "fi": "Aalto ACRIS" + }, + "homepage": [{ + "title": { + "en": "https://research.aalto.fi", + "fi": "https://research.aalto.fi" + }, + "identifier": "https://research.aalto.fi" + }] + }, + "identifier": "urn:nbn:fi:att:data-catalog-acris", + "access_rights": { + "license": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0" + } + ], + "access_type": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } + } + ], + "description": { + "en": "Contains datasets from Aalto University's ACRIS system", + "fi": "SisƤltƤƤ aineistoja Aalto yliopiston ACRIS-jƤrjestelmƤstƤ" + } + }, + "dataset_versioning": false, + "research_dataset_schema": "att" + }, + "catalog_record_services_edit": "metax,aalto", + "catalog_record_services_create": "metax,aalto", + "catalog_record_services_read": "metax,aalto" }] \ No newline at end of file diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index f8a3d018..c3c78c0f 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -38,6 +38,7 @@ class Role(Enum): EXTERNAL = "external" JYU = "jyu" REPOTRONIC = "repotronic" + AALTO = "aalto" def __ge__(self, other): if self.__class__ is other.__class__: diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index d2f9a447..6a0f702a 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -13,6 +13,7 @@ LEGACY_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-legacy" DFT_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-dft" REPOTRONIC_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-repotronic" +AALTO_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-acris" END_USER_ALLOWED_DATA_CATALOGS = [ IDA_DATA_CATALOG_IDENTIFIER, @@ -26,6 +27,7 @@ LEGACY_CATALOGS = [ LEGACY_DATA_CATALOG_IDENTIFIER, REPOTRONIC_DATA_CATALOG_IDENTIFIER, + AALTO_DATA_CATALOG_IDENTIFIER ] VALIDATE_TOKEN_URL = env("VALIDATE_TOKEN_URL") diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index 95fd4961..d08fed47 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -1,9 +1,9 @@ from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 -api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC] -api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC] -api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC] +api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO] +api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO] +api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO] api_permissions.rest.directories.read += [Role.IDA, Role.QVAIN_LIGHT] From 983f26cabfa2328e2582d10ac8f3f066f3cc5696 Mon Sep 17 00:00:00 2001 From: Toni Nurmi Date: Fri, 19 Nov 2021 08:35:40 +0000 Subject: [PATCH 108/160] Revert "Merge branch 'editing-rights' into 'test'" This reverts merge request !71 --- src/metax_api/api/rest/base/router.py | 6 - .../api/rest/base/serializers/__init__.py | 1 - .../editor_permissions_serializer.py | 20 - src/metax_api/api/rest/base/views/__init__.py | 1 - .../api/rest/base/views/common_view.py | 4 +- .../api/rest/base/views/dataset_view.py | 1 - .../base/views/editor_permissions_view.py | 116 ----- src/metax_api/api/rest/v2/router.py | 6 - .../migrations/0041_add_editorpermissions.py | 134 ------ .../migrations/0042_auto_20211108_1256.py | 21 - ...43_remove_editoruserpermission_verified.py | 17 - src/metax_api/models/__init__.py | 2 +- src/metax_api/models/catalog_record.py | 83 ---- src/metax_api/models/catalog_record_v2.py | 6 - .../services/catalog_record_service.py | 20 - .../settings/components/access_control.py | 24 - src/metax_api/swagger/v1/swagger.yaml | 170 +------ src/metax_api/swagger/v2/swagger.yaml | 164 +------ .../api/rest/base/views/datasets/read.py | 13 - .../api/rest/base/views/datasets/write.py | 17 - .../base/views/editorpermissions/__init__.py | 2 - .../rest/base/views/editorpermissions/read.py | 87 ---- .../base/views/editorpermissions/write.py | 153 ------- .../api/rest/v2/views/datasets/drafts.py | 16 - .../tests/api/rpc/v2/views/dataset_rpc.py | 15 - .../tests/testdata/generate_test_data.py | 38 +- src/metax_api/tests/testdata/test_data.json | 418 ------------------ 27 files changed, 13 insertions(+), 1542 deletions(-) delete mode 100644 src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py delete mode 100644 src/metax_api/api/rest/base/views/editor_permissions_view.py delete mode 100644 src/metax_api/migrations/0041_add_editorpermissions.py delete mode 100644 src/metax_api/migrations/0042_auto_20211108_1256.py delete mode 100644 src/metax_api/migrations/0043_remove_editoruserpermission_verified.py delete mode 100644 src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py delete mode 100644 src/metax_api/tests/api/rest/base/views/editorpermissions/read.py delete mode 100644 src/metax_api/tests/api/rest/base/views/editorpermissions/write.py diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index ac745be9..742103a3 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -27,7 +27,6 @@ DataCatalogViewSet, DatasetViewSet, DirectoryViewSet, - EditorPermissionViewSet, FileStorageViewSet, FileViewSet, SchemaViewSet, @@ -84,9 +83,4 @@ def get_default_basename(self, viewset): DatasetViewSet, ) -router.register( - "datasets/(?P.+)/editor_permissions/users", - EditorPermissionViewSet, -) - api_urlpatterns = router.urls diff --git a/src/metax_api/api/rest/base/serializers/__init__.py b/src/metax_api/api/rest/base/serializers/__init__.py index 84d9c24b..1c243720 100755 --- a/src/metax_api/api/rest/base/serializers/__init__.py +++ b/src/metax_api/api/rest/base/serializers/__init__.py @@ -13,4 +13,3 @@ from .file_storage_serializer import FileStorageSerializer from .serializer_utils import validate_json from .xml_metadata_serializer import XmlMetadataSerializer -from .editor_permissions_serializer import EditorPermissionsSerializer diff --git a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py deleted file mode 100644 index b306b416..00000000 --- a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py +++ /dev/null @@ -1,20 +0,0 @@ -from django.core.validators import EMPTY_VALUES - -from rest_framework.serializers import ValidationError, ModelSerializer - -from metax_api.models import EditorUserPermission - -from .common_serializer import CommonSerializer - - -class EditorPermissionsSerializer(ModelSerializer): - class Meta: - model = EditorUserPermission - fields = "__all__" - - extra_kwargs = CommonSerializer.Meta.extra_kwargs - - def validate(self, attrs): - data = ModelSerializer.validate(self, attrs) - - return data diff --git a/src/metax_api/api/rest/base/views/__init__.py b/src/metax_api/api/rest/base/views/__init__.py index 027ae76f..d5c62dd9 100755 --- a/src/metax_api/api/rest/base/views/__init__.py +++ b/src/metax_api/api/rest/base/views/__init__.py @@ -10,7 +10,6 @@ from .data_catalog_view import DataCatalogViewSet from .dataset_view import DatasetViewSet from .directory_view import DirectoryViewSet -from .editor_permissions_view import EditorPermissionViewSet from .file_storage_view import FileStorageViewSet from .file_view import FileViewSet from .schema_view import SchemaViewSet diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index 3e50d00c..14de18ca 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -373,9 +373,7 @@ def _check_and_store_bulk_error(self, request, response): """ if "failed" in response.data and len(response.data["failed"]): try: - error_json = ApiErrorSerializerV2.request_to_json( - self.request, response, other={"bulk_request": True} - ) + error_json = ApiErrorSerializerV2.request_to_json(self.request, response, other={"bulk_request": True}) response.data["error_identifier"] = error_json["identifier"] rabbitmq.publish(error_json, exchange="apierrors") except Exception as e: diff --git a/src/metax_api/api/rest/base/views/dataset_view.py b/src/metax_api/api/rest/base/views/dataset_view.py index 1aeaab0b..bd416afa 100755 --- a/src/metax_api/api/rest/base/views/dataset_view.py +++ b/src/metax_api/api/rest/base/views/dataset_view.py @@ -10,7 +10,6 @@ from django.conf import settings from django.http import Http404 - from rest_framework import status from rest_framework.decorators import action from rest_framework.response import Response diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py deleted file mode 100644 index 6a94d0fa..00000000 --- a/src/metax_api/api/rest/base/views/editor_permissions_view.py +++ /dev/null @@ -1,116 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT -import datetime -import logging - -from django.core.validators import EMPTY_VALUES - -from django.shortcuts import get_object_or_404 -from rest_framework import status - -from rest_framework.response import Response - -from metax_api.models import CatalogRecord -from metax_api.models.catalog_record import PermissionRole, EditorUserPermission -from metax_api.permissions import ServicePermissions -from metax_api.services import CommonService - -from ..serializers import EditorPermissionsSerializer -from .common_view import CommonViewSet - -_logger = logging.getLogger(__name__) - - -class EditorPermissionViewSet(CommonViewSet): - lookup_field = "user_id" - permission_classes = [ServicePermissions,] - serializer_class = EditorPermissionsSerializer - - def __init__(self, *args, **kwargs): - super(EditorPermissionViewSet, self).__init__(*args, **kwargs) - - def get_queryset(self): - if CommonService.is_primary_key(self.kwargs['cr_identifier']): - cr = get_object_or_404(CatalogRecord, pk=int(self.kwargs['cr_identifier'])) - else: - cr = get_object_or_404(CatalogRecord, identifier=self.kwargs['cr_identifier']) - return cr.editor_permissions.users - - def list(self, request, *args, **kwargs): - users = self.get_queryset() - editorserializer = EditorPermissionsSerializer(users.all(), many=True) - return Response(editorserializer.data) - - def create(self, request, *args, **kwargs): - data = request.data - - perms_id = self.get_queryset().instance.id - if "user_id" not in data: - return Response({"user_id": "Missing user_id"}, status=status.HTTP_400_BAD_REQUEST) - - editorserializer = None - removed_user = EditorUserPermission.objects_unfiltered.filter( - user_id=data.get('user_id'), editor_permissions_id=perms_id).first() - if removed_user not in EMPTY_VALUES and removed_user.removed is True: - data['date_modified'] = datetime.datetime.now() - data['date_removed'] = None - data['removed'] = False - editorserializer = EditorPermissionsSerializer(removed_user, data=data, partial=True) - elif removed_user not in EMPTY_VALUES and removed_user.removed is False: - return Response( - {'user_id': "User_id already exists"}, status=status.HTTP_400_BAD_REQUEST - ) - else: - data['editor_permissions'] = perms_id - data['date_created'] = datetime.datetime.now() - editorserializer = EditorPermissionsSerializer(data=data) - if editorserializer.is_valid(): - editorserializer.save() - return Response(editorserializer.data, status=status.HTTP_201_CREATED) - else: - return Response(editorserializer.errors, status=status.HTTP_400_BAD_REQUEST) - - def destroy(self, request, *args, **kwargs): - user = self.get_object() - users = self.get_queryset() - - creators = users.filter(role=PermissionRole.CREATOR, removed=False).count() - if user.role == PermissionRole.CREATOR and creators < 2: - return Response( - {"error": "Can't delete last creator"}, status=status.HTTP_400_BAD_REQUEST - ) - else: - user.remove() - return Response(status=status.HTTP_200_OK) - - def partial_update(self, request, **kwargs): - data = request.data - user = self.get_object() - users = self.get_queryset() - - if 'role' in data and user.role == PermissionRole.CREATOR: - creators = users.filter(role=PermissionRole.CREATOR, removed=False).count() - if creators < 2 and data.get('role') != PermissionRole.CREATOR: - return Response({"error": "Can't change last creator"}, status=status.HTTP_400_BAD_REQUEST) - - data['date_modified'] = datetime.datetime.now() - serializer = EditorPermissionsSerializer(user, data=data, partial=True) - if serializer.is_valid(): - serializer.save() - return Response(serializer.data) - else: - return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) - - def update_bulk(self, request, *args, **kwargs): - return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) - - def partial_update_bulk(self, request, *args, **kwargs): - return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) - - def destroy_bulk(self, request, *args, **kwargs): - return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) - diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index 10bcd0c9..46e703d5 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -29,7 +29,6 @@ FileStorageViewSet, FileViewSet, SchemaViewSet, - EditorPermissionViewSet, ) from metax_api.api.rest.v2.views import ApiErrorViewSetV2 @@ -111,9 +110,4 @@ def __init__(self, *args, **kwargs): DatasetViewSet, ) -router_v2.register( - "datasets/(?P.+)/editor_permissions/users", - EditorPermissionViewSet, -) - api_urlpatterns = router_v1.urls + router_v2.urls diff --git a/src/metax_api/migrations/0041_add_editorpermissions.py b/src/metax_api/migrations/0041_add_editorpermissions.py deleted file mode 100644 index b2ea2fc5..00000000 --- a/src/metax_api/migrations/0041_add_editorpermissions.py +++ /dev/null @@ -1,134 +0,0 @@ -# Generated by Django 3.1.13 on 2021-08-26 11:14 - -from django.db import migrations, models -import django.db.models.deletion -from django.utils import timezone -import logging -from metax_api.models import catalog_record - -logger = logging.getLogger(__name__) - - -def add_permissions(apps, schema_editor): - """ - Add EditorPermissions for each version set and related next_draft CatalogRecords. - - Here CatalogRecords not belonging to a DatasetVersionSet are considered a version set with size 1. - """ - CatalogRecord = apps.get_model("metax_api", "CatalogRecordV2") - EditorUserPermission = apps.get_model("metax_api", "EditorUserPermission") - EditorPermissions = apps.get_model("metax_api", "EditorPermissions") - - num_perms = 0 - num_datasets = 0 - prev_version_set_id = None - version_set_users = [] - version_set_crs = [] - - def flush_version_set(): - """Create single EditorPermissions object for each version set, add creator user""" - nonlocal num_perms, num_datasets, version_set_crs, version_set_users - if len(version_set_crs) > 0: - permissions = EditorPermissions.objects.create() - permissions.catalog_records.add(*version_set_crs) - num_perms += 1 - num_datasets += len(version_set_crs) - - for user in version_set_users: - now = timezone.now().replace(microsecond=0) - EditorUserPermission.objects.create( - editor_permissions=permissions, - user_id=user, - verified=True, - role="creator", - date_created=now, - ) - version_set_users = [] - version_set_crs = [] - - # group datasets by version_sets and include their next_draft datasets - for cr in CatalogRecord.objects.filter(draft_of__isnull=True).order_by("dataset_version_set_id", "id"): - if cr.dataset_version_set_id is None or cr.dataset_version_set_id != prev_version_set_id: - flush_version_set() - - version_set_crs.append(cr) - if cr.next_draft: - version_set_crs.append(cr.next_draft) - - # DatasetVersionSet shouldn't have multiple metadata_provider_users but this supports them just in case - if cr.metadata_provider_user and cr.metadata_provider_user not in version_set_users: - version_set_users.append(cr.metadata_provider_user) - - prev_version_set_id = cr.dataset_version_set_id - flush_version_set() - - logger.info(f"Added {num_perms} EditorPermissions to {num_datasets} datasets") - -def revert(apps, schema_editor): - pass - - -class Migration(migrations.Migration): - - dependencies = [ - ('metax_api', '0040_auto_20211006_1116'), - ] - - operations = [ - migrations.CreateModel( - name='EditorPermissions', - fields=[ - ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), - ], - ), - migrations.CreateModel( - name='EditorUserPermission', - fields=[ - ('id', models.BigAutoField(editable=False, primary_key=True, serialize=False)), - ('active', models.BooleanField(default=True)), - ('removed', models.BooleanField(default=False)), - ('date_modified', models.DateTimeField(null=True)), - ('user_modified', models.CharField(max_length=200, null=True)), - ('date_created', models.DateTimeField()), - ('user_created', models.CharField(max_length=200, null=True)), - ('service_modified', models.CharField(help_text='Name of the service who last modified the record', max_length=200, null=True)), - ('service_created', models.CharField(help_text='Name of the service who created the record', max_length=200, null=True)), - ('date_removed', models.DateTimeField(null=True)), - ('user_id', models.CharField(max_length=200)), - ('role', models.CharField(choices=[('creator', 'Creator'), ('editor', 'Editor')], max_length=16)), - ('verified', models.BooleanField(default=False)), - ('verification_token', models.CharField(max_length=32, null=True)), - ('verification_token_expires', models.DateTimeField(null=True)), - ('editor_permissions', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='metax_api.editorpermissions')), - - ], - ), - migrations.AddField( - model_name='catalogrecord', - name='editor_permissions', - field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='catalog_records', to='metax_api.editorpermissions'), - ), - migrations.AddIndex( - model_name='editoruserpermission', - index=models.Index(fields=['user_id'], name='metax_api_e_user_id_0b47cc_idx'), - ), - migrations.AddConstraint( - model_name='editoruserpermission', - constraint=models.UniqueConstraint(fields=('editor_permissions', 'user_id'), name='unique_dataset_user_permission'), - ), - migrations.AddConstraint( - model_name='editoruserpermission', - constraint=models.CheckConstraint(check=models.Q(_negated=True, user_id=''), name='require_user_id'), - ), - migrations.AddConstraint( - model_name='editoruserpermission', - constraint=models.CheckConstraint(check=models.Q(role__in=['creator', 'editor']), name='require_role'), - ), - migrations.RunPython(add_permissions, revert), - migrations.AlterField( - model_name='catalogrecord', - name='editor_permissions', - field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='catalog_records', - to='metax_api.editorpermissions'), - ), - ] diff --git a/src/metax_api/migrations/0042_auto_20211108_1256.py b/src/metax_api/migrations/0042_auto_20211108_1256.py deleted file mode 100644 index 51e1afcb..00000000 --- a/src/metax_api/migrations/0042_auto_20211108_1256.py +++ /dev/null @@ -1,21 +0,0 @@ -# Generated by Django 3.1.13 on 2021-11-08 10:56 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ('metax_api', '0041_add_editorpermissions'), - ] - - operations = [ - migrations.RemoveField( - model_name='editoruserpermission', - name='verification_token', - ), - migrations.RemoveField( - model_name='editoruserpermission', - name='verification_token_expires', - ), - ] diff --git a/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py b/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py deleted file mode 100644 index 220acfab..00000000 --- a/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py +++ /dev/null @@ -1,17 +0,0 @@ -# Generated by Django 3.1.13 on 2021-11-10 09:30 - -from django.db import migrations - - -class Migration(migrations.Migration): - - dependencies = [ - ('metax_api', '0042_auto_20211108_1256'), - ] - - operations = [ - migrations.RemoveField( - model_name='editoruserpermission', - name='verified', - ), - ] diff --git a/src/metax_api/models/__init__.py b/src/metax_api/models/__init__.py index 643cac39..ec08fb44 100755 --- a/src/metax_api/models/__init__.py +++ b/src/metax_api/models/__init__.py @@ -6,7 +6,7 @@ # :license: MIT from .api_error import ApiError -from .catalog_record import AlternateRecordSet, CatalogRecord, EditorPermissions, EditorUserPermission +from .catalog_record import AlternateRecordSet, CatalogRecord from .catalog_record_v2 import CatalogRecordV2 from .common import Common from .contract import Contract diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index 14736162..d052ee22 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -8,18 +8,15 @@ import logging from collections import defaultdict from copy import deepcopy -from datetime import datetime, timedelta from django.conf import settings from django.contrib.postgres.fields import ArrayField from django.db import connection, models, transaction from django.db.models import JSONField, Q, Sum from django.http import Http404 -from django.utils.crypto import get_random_string from rest_framework.serializers import ValidationError from metax_api.exceptions import Http400, Http403, Http503 -from metax_api.tasks.refdata.refdata_indexer import service from metax_api.utils import ( DelayedLog, IdentifierType, @@ -61,55 +58,6 @@ DFT_CATALOG = settings.DFT_DATA_CATALOG_IDENTIFIER -class EditorPermissions(models.Model): - """ - Shared permissions between linked copies of same dataset. - - Attaches a set of EditorUserPermission objects to a set of CatalogRecords. - """ - id = models.BigAutoField(primary_key=True, editable=False) - - -class PermissionRole(models.TextChoices): - CREATOR = "creator" - EDITOR = "editor" - - -class EditorUserPermission(Common): - """ - Table for attaching user roles to an EditorPermissions object. - """ - - # MODEL FIELD DEFINITIONS # - editor_permissions = models.ForeignKey( - EditorPermissions, related_name="users", on_delete=models.CASCADE - ) - user_id = models.CharField(max_length=200) - role = models.CharField(max_length=16, choices=PermissionRole.choices) - - class Meta: - indexes = [ - models.Index( - fields=[ - "user_id", - ] - ), - ] - constraints = [ - models.UniqueConstraint( - fields=["editor_permissions", "user_id"], name="unique_dataset_user_permission" - ), - models.CheckConstraint(check=~models.Q(user_id=""), name="require_user_id"), - models.CheckConstraint(check=models.Q(role__in=PermissionRole.values), name="require_role"), - ] - - def __repr__(self): - return f"" - - def delete(self, *args, **kwargs): - super().remove(*args, **kwargs) - - class DiscardRecord(Exception): pass @@ -497,10 +445,6 @@ class CatalogRecord(Common): help_text="Saves api related info about the dataset. E.g. api version", ) - editor_permissions = models.ForeignKey( - EditorPermissions, related_name="catalog_records", null=False, on_delete=models.PROTECT - ) - # END OF MODEL FIELD DEFINITIONS # """ @@ -1518,12 +1462,6 @@ def _pre_create_operations(self, pid_type=None): self._set_api_version() - # only new datasets need new EditorPermissions, copies already have one - if not self.editor_permissions_id: - self._add_editor_permissions() - if self.metadata_provider_user: - self._add_creator_editor_user_permission() - def _post_create_operations(self): if "files" in self.research_dataset or "directories" in self.research_dataset: # files must be added after the record itself has been created, to be able @@ -3099,27 +3037,6 @@ def _copy_undeleted_files_from_old_version(self): if DEBUG: _logger.debug("Added %d files to dataset %s" % (n_files_copied, self._new_version.id)) - def _add_editor_permissions(self): - permissions = EditorPermissions.objects.create() - self.editor_permissions = permissions - - def _add_creator_editor_user_permission(self): - """ - Add creator permission to a newly created CatalogRecord. - """ - perm = EditorUserPermission( - editor_permissions=self.editor_permissions, - user_id=self.metadata_provider_user, - role=PermissionRole.CREATOR, - date_created=self.date_created, - date_modified=self.date_modified, - user_created=self.user_created, - user_modified=self.user_modified, - service_created=self.service_created, - service_modified=self.service_modified, - ) - perm.save() - class RabbitMQPublishRecord: diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index 583a75c2..b9ee5895 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -117,12 +117,6 @@ def _pre_create_operations(self): self._set_api_version() - # only new datasets need new EditorPermissions, copies already have one - if not self.editor_permissions_id: - self._add_editor_permissions() - if self.metadata_provider_user: - self._add_creator_editor_user_permission() - def _post_create_operations(self, pid_type=None): if "files" in self.research_dataset or "directories" in self.research_dataset: diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 73277998..3f144272 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -130,28 +130,8 @@ def get_queryset_search_params(cls, request): queryset_search_params["api_meta__contains"] = {"version": value} - if request.query_params.get("editor_permissions_user"): - cls.filter_by_editor_permissions_user(request, queryset_search_params) - return queryset_search_params - @staticmethod - def filter_by_editor_permissions_user(request, queryset_search_params): - """ - Add filter for querying datasets where user has editor user permissions. - """ - user_id = request.query_params["editor_permissions_user"] - - # non-service users can only query their own datasets - if not request.user.is_service: - if request.user.username == '': - raise Http403({"detail": ["Query by editor_permissions_user is only supported for authenticated users"]}) - if request.user.username != user_id: - raise Http403({"detail": ["Provided editor_permissions_user does not match current user"]}) - - queryset_search_params["editor_permissions__users__user_id"] = user_id - queryset_search_params["editor_permissions__users__removed"] = False - @staticmethod def filter_by_state(request, queryset_search_params): """ diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index f8a3d018..756d1a57 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -11,7 +11,6 @@ "datacatalogs": {}, "datasets": {}, "directories": {}, - "editorpermissions": {}, "files": {}, "filestorages": {}, "schemas": {}, @@ -93,29 +92,6 @@ def __lt__(self, other): Role.ETSIN, ] -api_permissions.rest.editorpermissions.create = [ - Role.METAX, - Role.END_USERS, - Role.TPAS, - Role.QVAIN, - Role.ETSIN, -] -api_permissions.rest.editorpermissions.read = [Role.ALL] -api_permissions.rest.editorpermissions["update"] = [ - Role.METAX, - Role.END_USERS, - Role.TPAS, - Role.QVAIN, - Role.ETSIN, -] -api_permissions.rest.editorpermissions.delete = [ - Role.METAX, - Role.END_USERS, - Role.TPAS, - Role.QVAIN, - Role.ETSIN, -] - api_permissions.rest.directories.read = [ Role.METAX, Role.QVAIN, diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index fdd6bdfc..9c76ecc8 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -640,7 +640,7 @@ paths: in: query description: Sets paging on with default limit of 10 required: false - type: boolean + type: bolean - name: offset in: query description: Offset for paging @@ -1414,133 +1414,8 @@ paths: description: Resource not found. tags: - Dataset API - /rest/datasets/{CRID}/editor_permissions/users: - get: - summary: List all editor permissions of a record - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - responses: - "200": - description: Successful operation, return a list of editor rights. May return an empty list. - schema: - type: array - items: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - post: - summary: Create a new editor permission of a record - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: body - in: body - schema: - type: object - properties: - user_id: - type: string - role: - type: string - responses: - "201": - description: Successful operation, return created editor rights. - schema: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - /rest/datasets/{CRID}/editor_permissions/users/{USER_ID}: - get: - summary: List all editor permissions of a record - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: USER_ID - in: path - description: User ID - required: true - type: string - responses: - "200": - description: Successful operation, return a list of editor rights. May return an empty list. - schema: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - patch: - summary: Update role or enable verified - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: USER_ID - in: path - description: User ID - required: true - type: string - - name: body - in: body - schema: - $ref: '#/definitions/EditorUserPermission' - responses: - "200": - description: Successful operation, return changed editor rights. - schema: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - delete: - summary: Editorpermission marked as removed - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: USER_ID - in: path - description: User ID - required: true - type: string - responses: - '200': - description: Successful operation - '400': - description: Bad request - '403': - description: Forbidden - '404': - description: Not found - tags: - - Dataset API + + # Contract API /rest/contracts: get: @@ -2168,43 +2043,8 @@ definitions: count_cumulative: type: number format: float - EditorUserPermission: - type: object - properties: - id: - type: integer - readOnly: true - active: - type: boolean - removed: - type: boolean - date_modified: - type: string - format: date-time - readOnly: true - user_modified: - type: string - date_created: - type: string - format: date-time - readOnly: true - user_created: - type: string - service_modified: - type: string - service_created: - type: string - date_removed: - type: string - format: date-time - user_id: - type: string - role: - type: string - verified: - type: boolean - editor_permission_id: - type: integer + + examples: count_datasets: type: object diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index f7c29ee7..682ffdf8 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -1598,133 +1598,7 @@ paths: description: Resource not found. tags: - Dataset API - /rest/datasets/{CRID}/editor_permissions/users: - get: - summary: List all editor permissions of a record - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - responses: - "200": - description: Successful operation, return a list of editor rights. May return an empty list. - schema: - type: array - items: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - post: - summary: Create a new editor permission of a record - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: body - in: body - schema: - type: object - properties: - user_id: - type: string - role: - type: string - responses: - "201": - description: Successful operation, return created editor rights. - schema: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - /rest/datasets/{CRID}/editor_permissions/users/{USER_ID}: - get: - summary: List all editor permissions of a record - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: USER_ID - in: path - description: User ID - required: true - type: string - responses: - "200": - description: Successful operation, return a list of editor rights. May return an empty list. - schema: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - patch: - summary: Update role - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: USER_ID - in: path - description: User ID - required: true - type: string - - name: body - in: body - schema: - $ref: '#/definitions/EditorUserPermission' - responses: - "200": - description: Successful operation, return changed editor rights. - schema: - $ref: '#/definitions/EditorUserPermission' - "400": - description: Bad request. - "404": - description: Resource not found. - tags: - - Dataset API - delete: - summary: Editorpermission marked as removed - parameters: - - name: CRID - in: path - description: Catalog record ID - required: true - type: string - - name: USER_ID - in: path - description: User ID - required: true - type: string - responses: - '200': - description: Successful operation - '400': - description: Bad request - '403': - description: Forbidden - '404': - description: Not found - tags: - - Dataset API + # Contract API /rest/v2/contracts: @@ -2515,41 +2389,7 @@ definitions: count_cumulative: type: number format: float - EditorUserPermission: - type: object - properties: - id: - type: integer - readOnly: true - active: - type: boolean - removed: - type: boolean - date_modified: - type: string - format: date-time - readOnly: true - user_modified: - type: string - date_created: - type: string - format: date-time - readOnly: true - user_created: - type: string - service_modified: - type: string - service_created: - type: string - date_removed: - type: string - format: date-time - user_id: - type: string - role: - type: string - editor_permission_id: - type: integer + examples: count_datasets: diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index f2a718f0..98b255bc 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -965,19 +965,6 @@ def test_filter_by_legacy(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(count_all, response.data["count"], response.data) - def test_filter_by_editor_permissions_user_ok(self): - cr = CatalogRecord.objects.get(pk=1) - cr.editor_permissions.users.update(user_id='test_user_x') - response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") - self.assertEqual(response.data["count"], 1) - - def test_filter_by_editor_permissions_user_removed(self): - cr = CatalogRecord.objects.get(pk=1) - cr.editor_permissions.users.update(user_id='test_user_x') - cr.editor_permissions.users.first().delete() - response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") - self.assertEqual(response.data["count"], 0) - class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index d9a938f1..ce3ec89f 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -556,23 +556,6 @@ def test_create_catalog_record_using_pid_type(self): response.data["research_dataset"]["preferred_identifier"].startswith("urn:") ) - def test_create_catalog_record_adds_creator_permission(self): - response = self.client.post( - "/rest/datasets", - self.cr_test_data, - format="json", - ) - cr = CatalogRecord.objects.get(id=response.data["id"]) - self.assertEqual( - list(cr.editor_permissions.users.values("user_id", "role")), - [ - { - "user_id": self.cr_test_data["metadata_provider_user"], - "role": "creator", - } - ], - ) - class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): """ diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py deleted file mode 100644 index aef77389..00000000 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .read import * -from .write import * diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py deleted file mode 100644 index af7a67e9..00000000 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py +++ /dev/null @@ -1,87 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - -from json import load as json_load - -from django.core.management import call_command - -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class EditorUserPermissionApiReadCommon(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(EditorUserPermissionApiReadCommon, cls).setUpClass() - - def setUp(self): - self.cr_from_test_data = self._get_whole_object_from_test_data("catalogrecord", requested_pk=1) - self.crid = self.cr_from_test_data['pk'] - self.identifier = "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" - self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] - self.editor_user_permission = self._get_whole_object_from_test_data("editoruserpermission", requested_pk=1) - self.userid = self.editor_user_permission["fields"]["user_id"] - self._use_http_authorization() - - def _get_whole_object_from_test_data(self, model_name, requested_pk=0): - - with open(test_data_file_path) as test_data_file: - test_data = json_load(test_data_file) - - model = "metax_api.%s" % model_name - i = 0 - - for row in test_data: - if row["model"] == model: - if row["pk"] == requested_pk: - obj = { - "id": row["pk"], - } - obj.update(row) - return obj - - raise Exception( - "Could not find model %s from test data with pk == %d. " - "Are you certain you generated rows for model %s in generate_test_data.py?" - % (model_name, requested_pk, model_name) - ) - - -class EditorUserPermissionApiReadBasicTests(EditorUserPermissionApiReadCommon): - - """ - Basic read operations - """ - - def test_read_editor_permission_list_with_pk(self): - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_editor_permission_list_with_uuid(self): - response = self.client.get("/rest/datasets/%s/editor_permissions/users" % self.identifier) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - def test_read_editor_permission_list_invalid(self): - response = self.client.get("/rest/datasets/99999/editor_permissions/users") - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - - def test_read_editor_permission_details_by_pk(self): - response = self.client.get("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, self.userid)) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["editor_permissions"], self.permissionid) - self.assertEqual(response.data["user_id"], self.userid) - - def test_read_editor_permission_details_by_pk_invalid(self): - response = self.client.get("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, "invalid")) - self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) - diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py deleted file mode 100644 index 5bc831f0..00000000 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py +++ /dev/null @@ -1,153 +0,0 @@ -# This file is part of the Metax API service -# -# Copyright 2017-2018 Ministry of Education and Culture, Finland -# -# :author: CSC - IT Center for Science Ltd., Espoo Finland -# :license: MIT - - -from json import load as json_load - -from django.core.management import call_command - -from rest_framework import status -from rest_framework.test import APITestCase - -from metax_api.models import EditorUserPermission -from metax_api.tests.utils import TestClassUtils, test_data_file_path - - -class EditorUserPermissionApiWriteCommon(APITestCase, TestClassUtils): - @classmethod - def setUpClass(cls): - """ - Loaded only once for test cases inside this class. - """ - call_command("loaddata", test_data_file_path, verbosity=0) - super(EditorUserPermissionApiWriteCommon, cls).setUpClass() - - def setUp(self): - self.cr_from_test_data = self._get_whole_object_from_test_data("catalogrecord", requested_pk=1) - self.crid = self.cr_from_test_data['pk'] - self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] - self.editor_user_permission = self._get_whole_object_from_test_data("editoruserpermission", requested_pk=1) - self.userid = self.editor_user_permission["fields"]["user_id"] - self._use_http_authorization() - - def _get_whole_object_from_test_data(self, model_name, requested_pk=0): - - with open(test_data_file_path) as test_data_file: - test_data = json_load(test_data_file) - - model = "metax_api.%s" % model_name - i = 0 - - for row in test_data: - if row["model"] == model: - if row["pk"] == requested_pk: - obj = { - "id": row["pk"], - } - obj.update(row) - return obj - - raise Exception( - "Could not find model %s from test data with pk == %d. " - "Are you certain you generated rows for model %s in generate_test_data.py?" - % (model_name, requested_pk, model_name) - ) - - -class EditorUserPermissionApiWriteBasicTests(EditorUserPermissionApiWriteCommon): - - """ - Basic read operations - """ - - def test_write_editor_permission(self): - self._set_http_authorization("service") - data = {"role": "editor", "user_id": "test_editor"} - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - user_count = len(response.data) - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data["editor_permissions"], self.permissionid) - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), user_count + 1) - - def test_write_editor_permission_invalid_data(self): - self._set_http_authorization("service") - data = {"role": "editor"} - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - user_count = len(response.data) - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), user_count) - - def test_write_editor_permission_existing_userid(self): - self._set_http_authorization("service") - data = {"role": "editor", "user_id": "double_editor"} - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - user_count = len(response.data) - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(len(response.data), user_count + 1) - - def test_write_editor_permission_change_values(self): - self._set_http_authorization("service") - data = {"role": "creator", "user_id": "change_editor"} - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - user_count = len(response.data) - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - new_data = {"role": "editor"} - response = self.client.patch("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, - response.data.get('user_id')), - new_data, format="json") - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - self.assertEqual(response.data.get("role"), "editor") - - def test_write_editor_permission_remove_users(self): - self._set_http_authorization("service") - data = {"role": "creator", "user_id": "new_creator"} - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - response = self.client.delete("/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get('user_id'))) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) - for user in response.data: - if user.get('role') == 'creator': - response = self.client.delete( - "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, user.get('user_id'))) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) - else: - response = self.client.delete( - "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, user.get('user_id'))) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - - def test_write_editor_permission_add_removed_user(self): - self._set_http_authorization("service") - data = {"role": "editor", "user_id": "new_editor"} - # add - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - # remove - response = self.client.delete( - "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get('user_id'))) - self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) - removed_user = EditorUserPermission.objects_unfiltered.get(user_id="new_editor", - editor_permissions_id=self.permissionid) - self.assertEqual(removed_user.removed, True) - response = self.client.post("/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json") - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - self.assertEqual(response.data.get('removed'), False) - diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py index 398e661c..710ecfef 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py @@ -612,22 +612,6 @@ def test_create_and_merge_draft(self): ) self.assertEqual("next_draft" in response.data, False, "next_draft link should be gone") - def test_create_and_merge_draft_keeps_permissions(self): - """ - Ensure creating and merging drafts keeps the same EditorPermission object. - """ - cr = self._create_dataset() - original_editor_permissions_id = CatalogRecordV2.objects.get(id=cr['id']).editor_permissions_id - - draft_cr = self._create_draft(cr["id"]) - draft_editor_permissions_id = CatalogRecordV2.objects.get(id=draft_cr['id']).editor_permissions_id - self.assertEqual(draft_editor_permissions_id, original_editor_permissions_id) - - self._merge_draft_changes(draft_cr["id"]) - merged_editor_permissions_id = CatalogRecordV2.objects.get(id=cr['id']).editor_permissions_id - self.assertEqual(merged_editor_permissions_id, original_editor_permissions_id) - - def test_missing_issued_date_is_generated_when_draft_is_merged(self): """ Testing a case where user removes 'issued_date' from draft before merging diff --git a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py index d61fce67..d89f0c29 100755 --- a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py @@ -121,21 +121,6 @@ def test_create_new_version(self): response.data["identifier"], ) - def test_create_new_version_shares_permissions(self): - """ - Ensure new version shares EditorPermissions with the original. - """ - response = self.client.post( - "/rpc/v2/datasets/create_new_version?identifier=5", format="json" - ) - self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) - - next_version_identifier = response.data.get("identifier") - cr = CR.objects.get(id=5) - next_version_cr = CR.objects.get(identifier=next_version_identifier) - self.assertEqual(cr.editor_permissions_id, next_version_cr.editor_permissions_id) - - def test_delete_new_version_draft(self): """ Ensure a new version that is created into draft state can be deleted, and is permanently deleted. diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index b6ffcabc..6729396a 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -268,7 +268,6 @@ def save_test_data( contract_list, catalog_record_list, dataset_version_sets, - editor_permissions ): with open("test_data.json", "w") as f: print("dumping test data as json to metax_api/tests/test_data.json...") @@ -279,13 +278,13 @@ def save_test_data( + data_catalogs_list + contract_list + dataset_version_sets - + editor_permissions + catalog_record_list, f, indent=4, sort_keys=True, ) + def generate_data_catalogs(start_idx, data_catalog_max_rows, validate_json, type): print("generating %s data catalogs..." % type) test_data_catalog_list = [] @@ -369,28 +368,6 @@ def generate_contracts(contract_max_rows, validate_json): return test_contract_list -def add_editor_permissions(editor_permissions, dataset): - # add EditorPermissions - pk = len(editor_permissions) - editor_perms = { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": pk, - } - editor_permissions.append(editor_perms) - editor_user_perms = { - "fields": { - "user_id": dataset["fields"]["metadata_provider_user"], - "date_created": dataset["fields"]["date_created"], - "editor_permissions_id": pk, - "role": "creator", - }, - "model": "metax_api.editoruserpermission", - "pk": pk + 1, - } - editor_permissions.append(editor_user_perms) - dataset["fields"]["editor_permissions_id"] = pk - def generate_catalog_records( basic_catalog_record_max_rows, data_catalogs_list, @@ -400,7 +377,6 @@ def generate_catalog_records( type, test_data_list=[], dataset_version_sets=[], - editor_permissions=[], ): print("generating %s catalog records..." % type) @@ -450,8 +426,6 @@ def generate_catalog_records( new["fields"]["date_created"] = "2017-05-23T10:07:22Z" new["fields"]["files"] = [] - add_editor_permissions(editor_permissions, new) - # add files if type == "ida": @@ -678,8 +652,6 @@ def generate_catalog_records( new["fields"]["date_modified"] = "2017-09-23T10:07:22Z" new["fields"]["date_created"] = "2017-05-23T10:07:22Z" - add_editor_permissions(editor_permissions, new) - new["fields"]["research_dataset"]["metadata_version_identifier"] = generate_test_identifier( cr_type, len(test_data_list) + 1, urn=False ) @@ -789,7 +761,7 @@ def generate_catalog_records( json_validate(new["fields"]["research_dataset"], json_schema) test_data_list.append(new) - return test_data_list, dataset_version_sets, editor_permissions + return test_data_list, dataset_version_sets def generate_alt_catalog_records(test_data_list): @@ -872,7 +844,7 @@ def set_qvain_info_to_records(catalog_record_list): ida_data_catalog_max_rows + 1, att_data_catalog_max_rows, validate_json, "att" ) - catalog_record_list, dataset_version_sets, editor_permissions = generate_catalog_records( + catalog_record_list, dataset_version_sets = generate_catalog_records( ida_catalog_record_max_rows, ida_data_catalogs_list, contract_list, @@ -881,7 +853,7 @@ def set_qvain_info_to_records(catalog_record_list): "ida", ) - catalog_record_list, dataset_version_sets, editor_permissions = generate_catalog_records( + catalog_record_list, dataset_version_sets = generate_catalog_records( att_catalog_record_max_rows, att_data_catalogs_list, contract_list, @@ -890,7 +862,6 @@ def set_qvain_info_to_records(catalog_record_list): "att", catalog_record_list, dataset_version_sets, - editor_permissions, ) catalog_record_list = generate_alt_catalog_records(catalog_record_list) @@ -905,7 +876,6 @@ def set_qvain_info_to_records(catalog_record_list): contract_list, catalog_record_list, dataset_version_sets, - editor_permissions, ) print("done") diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index 96607ab1..c3802a10 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5712,396 +5712,6 @@ "model": "metax_api.datasetversionset", "pk": 13 }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 0 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 0, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 1 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 2 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 2, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 3 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 4 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 4, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 5 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 6 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 6, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 7 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 8 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 8, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 9 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 10 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 10, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 11 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 12 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 12, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 13 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 14 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 14, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 15 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 16 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 16, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 17 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 18 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 18, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 19 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 20 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 20, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 21 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 22 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 22, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 23 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 24 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 24, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 25 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 26 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 26, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 27 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 28 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 28, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 29 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 30 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 30, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 31 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 32 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 32, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 33 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 34 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 34, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 35 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 36 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 36, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 37 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 38 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 38, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 39 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 40 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 40, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 41 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 42 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 42, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 43 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 44 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 44, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 45 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 46 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 46, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 47 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 48 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 48, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 49 - }, - { - "fields": {}, - "model": "metax_api.editorpermissions", - "pk": 50 - }, - { - "fields": { - "date_created": "2017-05-23T10:07:22Z", - "editor_permissions_id": 50, - "role": "creator", - "user_id": "abc-user-123" - }, - "model": "metax_api.editoruserpermission", - "pk": 51 - }, { "fields": {}, "model": "metax_api.alternaterecordset", @@ -6117,7 +5727,6 @@ "dataset_version_set": 1, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 0, "files": [ 1, 2 @@ -6273,7 +5882,6 @@ "dataset_version_set": 2, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 2, "files": [ 3, 4 @@ -6429,7 +6037,6 @@ "dataset_version_set": 3, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 4, "files": [ 5, 6 @@ -6585,7 +6192,6 @@ "dataset_version_set": 4, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 6, "files": [ 7, 8 @@ -6741,7 +6347,6 @@ "dataset_version_set": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 8, "files": [ 9, 10 @@ -6897,7 +6502,6 @@ "dataset_version_set": 6, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 10, "files": [ 11, 12 @@ -7053,7 +6657,6 @@ "dataset_version_set": 7, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 12, "files": [ 13, 14 @@ -7209,7 +6812,6 @@ "dataset_version_set": 8, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 14, "files": [ 15, 16 @@ -7350,7 +6952,6 @@ "dataset_version_set": 9, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 16, "files": [ 17, 18 @@ -7477,7 +7078,6 @@ "dataset_version_set": 10, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 18, "files": [ 19, 20 @@ -7601,7 +7201,6 @@ "dataset_version_set": 11, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor_permissions_id": 20, "files": [ 1, 2, @@ -8553,7 +8152,6 @@ "dataset_version_set": 12, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor_permissions_id": 22, "files": [ 1, 2, @@ -9505,7 +9103,6 @@ "dataset_version_set": 13, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor_permissions_id": 24, "files": [ 22, 23, @@ -10549,7 +10146,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 26, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9614", "metadata_owner_org": "abc-org-123", @@ -10689,7 +10285,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 28, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9615", "metadata_owner_org": "abc-org-123", @@ -10829,7 +10424,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 30, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9616", "metadata_owner_org": "abc-org-123", @@ -10969,7 +10563,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 32, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9617", "metadata_owner_org": "abc-org-123", @@ -11109,7 +10702,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 34, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9618", "metadata_owner_org": "abc-org-123", @@ -11249,7 +10841,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 36, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9619", "metadata_owner_org": "abc-org-123", @@ -11389,7 +10980,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 38, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9620", "metadata_owner_org": "abc-org-123", @@ -11520,7 +11110,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 40, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9621", "metadata_owner_org": "abc-org-123", @@ -11651,7 +11240,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 42, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9622", "metadata_owner_org": "abc-org-123", @@ -11782,7 +11370,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 44, "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9623", "metadata_owner_org": "abc-org-123", @@ -11912,7 +11499,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor_permissions_id": 46, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9624", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -12847,7 +12433,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor_permissions_id": 48, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9625", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -13782,7 +13367,6 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", - "editor_permissions_id": 50, "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9626", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -14720,7 +14304,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 18, "files": [ 19, 20 @@ -14846,7 +14429,6 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", - "editor_permissions_id": 18, "files": [ 19, 20 From c047b44daeead3b08180bc6ac91ca181bbd92ebe Mon Sep 17 00:00:00 2001 From: Toni Date: Fri, 19 Nov 2021 14:55:29 +0200 Subject: [PATCH 109/160] log deleted objects instead of saving them to database, write apierrors to file instead of database for now. --- src/metax_api/services/rabbitmq_service.py | 11 +++++++++-- src/metax_api/settings/__init__.py | 1 + src/metax_api/settings/components/common.py | 3 +++ src/metax_api/signals/post_delete.py | 8 ++++++-- src/metax_api/tests/models/signals.py | 4 ++-- 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/metax_api/services/rabbitmq_service.py b/src/metax_api/services/rabbitmq_service.py index d84ecddb..bd7daba9 100755 --- a/src/metax_api/services/rabbitmq_service.py +++ b/src/metax_api/services/rabbitmq_service.py @@ -7,16 +7,19 @@ import logging import random +from datetime import datetime from json import dumps as json_dumps, loads from time import sleep import pika + +from django.core import serializers from django.db import DatabaseError from django.conf import settings from django.core.serializers.json import DjangoJSONEncoder from metax_api.models import ApiError -from metax_api.utils.utils import executing_test_case +from metax_api.utils.utils import executing_test_case, datetime_to_str, parse_timestamp_string_to_tz_aware_datetime _logger = logging.getLogger(__name__) @@ -132,7 +135,11 @@ def consume_api_errors(self): finally: channel.basic_ack(method.delivery_tag) try: - ApiError.objects.bulk_create(errors, batch_size=5000) + # ApiError.objects.bulk_create(errors, batch_size=5000) + now = datetime.now() + tz_aware = parse_timestamp_string_to_tz_aware_datetime(datetime_to_str(now)) + with open(f"/var/log/metax-api/errors/api-errors/{tz_aware}") as out: + serializers.serialize("json", errors, stream=out) except DatabaseError as e: _logger.error("cannot create API Error. Discarding..") _logger.error(f"error: {e}") diff --git a/src/metax_api/settings/__init__.py b/src/metax_api/settings/__init__.py index b091c712..c9950152 100755 --- a/src/metax_api/settings/__init__.py +++ b/src/metax_api/settings/__init__.py @@ -28,6 +28,7 @@ ELASTIC_SEARCH_HOSTS=(list, ["localhost"]), ELASTIC_SEARCH_PORT=(int, 9200), ELASTIC_SEARCH_USE_SSL=(bool, False), + ENABLE_DELETED_OBJECTS_SAVING=(bool, False), ENABLE_V1_ENDPOINTS=(bool, True), ENABLE_V2_ENDPOINTS=(bool, True), ENABLE_DJANGO_WATCHMAN=(bool, False), diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index d2f9a447..10756d6d 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -32,6 +32,9 @@ CHECKSUM_ALGORITHMS = ["SHA-256", "MD5", "SHA-512"] ERROR_FILES_PATH = env("ERROR_FILES_PATH") +ENABLE_DELETED_OBJECTS_SAVING = env("ENABLE_DELETED_OBJECTS_SAVING") + + # Allow only specific hosts to access the app ALLOWED_HOSTS = ["localhost", "127.0.0.1", "[::1]"] diff --git a/src/metax_api/signals/post_delete.py b/src/metax_api/signals/post_delete.py index 3dd1a0f5..ccce7b4e 100644 --- a/src/metax_api/signals/post_delete.py +++ b/src/metax_api/signals/post_delete.py @@ -1,6 +1,7 @@ import json import logging +from django.conf import settings from django.db.models.signals import post_delete from django.dispatch import receiver from django.core.serializers.json import DjangoJSONEncoder @@ -21,8 +22,11 @@ def deleted_object_receiver(instance, sender, *args, **kwargs): if hasattr(instance, 'date_modified'): instance.date_modified = instance.date_modified.strftime("%m/%d/%Y, %H:%M:%S") instance = model_to_dict(instance) - deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) - DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) + if settings.ENABLE_DELETED_OBJECTS_SAVING: + deleted_object_json = json.dumps(instance, cls=DjangoJSONEncoder) + DeletedObject.objects.create(model_name=model_type, object_data=deleted_object_json) + else: + _logger.info(str(instance)) except Exception as e: _logger.error("cannot save Deleted Object. Discarding..") _logger.debug(f"error: {e}") \ No newline at end of file diff --git a/src/metax_api/tests/models/signals.py b/src/metax_api/tests/models/signals.py index b9c11aae..3e3b57e3 100644 --- a/src/metax_api/tests/models/signals.py +++ b/src/metax_api/tests/models/signals.py @@ -20,7 +20,7 @@ def setUp(self): call_command("loaddata", test_data_file_path, verbosity=0) self.today = date.today().strftime("%d/%m/%Y") - def test_deleting_catalog_record_creates_new_deleted_object(self): +""" def test_deleting_catalog_record_creates_new_deleted_object(self): # test that deleting CatalogRecord object creates a new deleted object CatalogRecord.objects_unfiltered.get(pk=1).delete(hard=True) deleted_object = DeletedObject.objects.last() @@ -31,6 +31,6 @@ def test_deleting_catalog_record_creates_new_deleted_object(self): CatalogRecordV2.objects_unfiltered.get(pk=2).delete(hard=True) deleted_object_v2 = DeletedObject.objects.last() self.assertEqual(deleted_object_v2.model_name, "CatalogRecordV2") - self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today) + self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today)""" From 8987f9b24784589a6460eed7b917581fe06307b4 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Wed, 24 Nov 2021 11:46:47 +0200 Subject: [PATCH 110/160] Added management command for marking files as removed --- .../management/commands/mark_files_removed.py | 49 +++++++++++++++++++ .../management/commands/mark_files_removed.py | 35 +++++++++++++ src/metax_api/tests/utils.py | 24 ++++++++- 3 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 src/metax_api/management/commands/mark_files_removed.py create mode 100644 src/metax_api/tests/management/commands/mark_files_removed.py diff --git a/src/metax_api/management/commands/mark_files_removed.py b/src/metax_api/management/commands/mark_files_removed.py new file mode 100644 index 00000000..0f5de913 --- /dev/null +++ b/src/metax_api/management/commands/mark_files_removed.py @@ -0,0 +1,49 @@ +import logging + +from django.core.management.base import BaseCommand +from metax_api.models import File +from metax_api.tests.utils import management_command_add_test_logs + +logger = logging.getLogger(__name__) + +class Command(BaseCommand): + help = """Marks files which start with a given file path removed from a given project. + File path prefix can be given as command line parameter or they can be read from a file. + This command will produce duplicate prints, the first line is used in tests, but is not stored on logs. + The second line is stored on logs, but can't be used by the tests.""" + + def add_arguments(self, parser): + parser.add_argument("project_identifier", type=str, help="Identifier of the project where the files are removed") + parser.add_argument("--path_prefix", type=str, help="Prefix for the file path of the files that are removed") + parser.add_argument("--path_prefix_file", type=str, help="Name of the file where to read the path prefixes (Required if --path-prefix is not given)") + + @management_command_add_test_logs(logger) + def handle(self, *args, **options): + logger.info("Remove files from database") + if options["path_prefix"] is None and options["path_prefix_file"] is None: + logger.error("path_prefix or path_prefix_file is required") + return + + if options["path_prefix_file"]: + path_prefixes = self.read_prefixes_from_file(options["path_prefix_file"]) + else: + path_prefixes = [options["path_prefix"]] + + + removed_files_sum = 0 + for prefix in path_prefixes: + files = File.objects.filter(project_identifier = options["project_identifier"], file_path__startswith = prefix, removed = "f") + logger.info("Found %d files to remove in project: %s with path prefix: %s" % (len(files), options["project_identifier"], prefix)) + for file in files: + file.delete() + removed_files_sum += len(files) + + logger.info("Removed %d files" % removed_files_sum) + + + + def read_prefixes_from_file(self, filename): + with open(filename) as file: + lines = file.readlines() + lines = [line.rstrip() for line in lines] + return lines diff --git a/src/metax_api/tests/management/commands/mark_files_removed.py b/src/metax_api/tests/management/commands/mark_files_removed.py new file mode 100644 index 00000000..1b401108 --- /dev/null +++ b/src/metax_api/tests/management/commands/mark_files_removed.py @@ -0,0 +1,35 @@ +from io import StringIO +import logging + +from django.core.management import call_command +from django.test import TestCase + +from metax_api.models import File +from metax_api.tests.utils import test_data_file_path +from metax_api.management.commands.mark_files_removed import Command + +_logger = logging.getLogger(__name__) + +class RemoveFilesTest(TestCase): + + def setUp(self): + call_command("loaddata", test_data_file_path, verbosity=0) + + def test_command_output(self): + project_identifier = "project_x" + path_prefix = "/project_x_FROZEN/Experiment_X/Phase_1/2017" + path_prefix_file = None + + out = StringIO() + args = [project_identifier] + options = {"path_prefix": path_prefix, "stdout": out} + call_command('mark_files_removed', *args, **options) + + self.assertIn('Found 10 files to remove in project: ' + project_identifier + ' with path prefix: ' + path_prefix, out.getvalue()) + self.assertIn('Removed 10 files', out.getvalue()) + + files = File.objects_unfiltered.filter(project_identifier = project_identifier, file_path__startswith = path_prefix) + + self.assertEqual(10, len(files)) + for file in files: + self.assertTrue(file.removed) \ No newline at end of file diff --git a/src/metax_api/tests/utils.py b/src/metax_api/tests/utils.py index 6fd25b6a..d4cbb3e0 100755 --- a/src/metax_api/tests/utils.py +++ b/src/metax_api/tests/utils.py @@ -9,6 +9,7 @@ from base64 import b64encode from contextlib import contextmanager from json import load as json_load +from io import StringIO from os import path import jwt @@ -456,10 +457,15 @@ def _get_new_file_data( @contextmanager -def streamhandler_to_console(lggr): +def streamhandler_to_console(lggr, command_obj = None): # Use 'up to date' value of sys.stdout for StreamHandler, # as set by test runner. - stream_handler = logging.StreamHandler(sys.stdout) + # If command_obj is given, use OutputWrapper of the given + # Command object + if command_obj == None: + stream_handler = logging.StreamHandler(sys.stdout) + else: + stream_handler = logging.StreamHandler(command_obj.stdout) lggr.addHandler(stream_handler) yield lggr.removeHandler(stream_handler) @@ -474,3 +480,17 @@ def testcase_log_console(*args, **kwargs): return testcase_log_console return testcase_decorator + +def management_command_add_test_logs(lggr): + # This decorator can be used by management commands to capture + # the logs of the command in a format that can be used in tests + def management_command_log_decorator(func): + def management_command_add_test_logs(*args, **kwargs): + # args[0] is the object which uses this decorator (typically a command object) + with streamhandler_to_console(lggr, args[0]): + return func(*args, **kwargs) + + return management_command_add_test_logs + + return management_command_log_decorator + From e7988253bef5d1b39023fc6822c97346639cdad3 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 25 Nov 2021 09:39:15 +0200 Subject: [PATCH 111/160] Updated string format syntax --- src/metax_api/management/commands/mark_files_removed.py | 4 ++-- src/metax_api/tests/management/commands/mark_files_removed.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/metax_api/management/commands/mark_files_removed.py b/src/metax_api/management/commands/mark_files_removed.py index 0f5de913..22c9ec86 100644 --- a/src/metax_api/management/commands/mark_files_removed.py +++ b/src/metax_api/management/commands/mark_files_removed.py @@ -33,12 +33,12 @@ def handle(self, *args, **options): removed_files_sum = 0 for prefix in path_prefixes: files = File.objects.filter(project_identifier = options["project_identifier"], file_path__startswith = prefix, removed = "f") - logger.info("Found %d files to remove in project: %s with path prefix: %s" % (len(files), options["project_identifier"], prefix)) + logger.info(f"Found {len(files)} files to remove in project: {options['project_identifier']} with path prefix: {prefix}") for file in files: file.delete() removed_files_sum += len(files) - logger.info("Removed %d files" % removed_files_sum) + logger.info(f"Removed {removed_files_sum} files") diff --git a/src/metax_api/tests/management/commands/mark_files_removed.py b/src/metax_api/tests/management/commands/mark_files_removed.py index 1b401108..4cb0eaaf 100644 --- a/src/metax_api/tests/management/commands/mark_files_removed.py +++ b/src/metax_api/tests/management/commands/mark_files_removed.py @@ -25,7 +25,7 @@ def test_command_output(self): options = {"path_prefix": path_prefix, "stdout": out} call_command('mark_files_removed', *args, **options) - self.assertIn('Found 10 files to remove in project: ' + project_identifier + ' with path prefix: ' + path_prefix, out.getvalue()) + self.assertIn(f'Found 10 files to remove in project: {project_identifier} with path prefix: {path_prefix}', out.getvalue()) self.assertIn('Removed 10 files', out.getvalue()) files = File.objects_unfiltered.filter(project_identifier = project_identifier, file_path__startswith = path_prefix) From 717f6d13e090b970b28f6ca1e04166ed1028d287 Mon Sep 17 00:00:00 2001 From: Toni Date: Fri, 26 Nov 2021 14:43:25 +0200 Subject: [PATCH 112/160] put api-error objects behind settings flag, disable signal and api-error tests if they are not enabled. --- src/metax_api/api/rest/base/views/common_view.py | 11 +++++++++-- src/metax_api/onappstart.py | 3 ++- src/metax_api/settings/__init__.py | 2 ++ src/metax_api/settings/components/common.py | 2 ++ src/metax_api/signals/__init__.py | 7 +++++-- .../tests/api/rest/v2/views/apierrors/read.py | 3 +++ src/metax_api/tests/models/signals.py | 10 ++++++---- 7 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index 14de18ca..f5f0be75 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -8,6 +8,7 @@ import logging from os import path +from django.conf import settings from django.http import Http404, HttpResponse from rest_framework import status from rest_framework.exceptions import APIException, MethodNotAllowed, PermissionDenied @@ -141,7 +142,10 @@ def handle_exception(self, exc): try: error_json = ApiErrorSerializerV2.request_to_json(self.request, response) response.data["error_identifier"] = error_json["identifier"] - rabbitmq.publish(error_json, exchange="apierrors") + if settings.ENABLE_API_ERROR_OBJECTS: + rabbitmq.publish(error_json, exchange="apierrors") + else: + _logger.error(f"api error: {str(error_json)}") except Exception as e: _logger.error(f"could not send api error to rabbitmq. Error: {e}") @@ -375,7 +379,10 @@ def _check_and_store_bulk_error(self, request, response): try: error_json = ApiErrorSerializerV2.request_to_json(self.request, response, other={"bulk_request": True}) response.data["error_identifier"] = error_json["identifier"] - rabbitmq.publish(error_json, exchange="apierrors") + if settings.ENABLE_API_ERROR_OBJECTS: + rabbitmq.publish(error_json, exchange="apierrors") + else: + _logger.error(f"api error: {str(error_json)}") except Exception as e: _logger.error(f"could not send api error to rabbitmq. Error: {e}") diff --git a/src/metax_api/onappstart.py b/src/metax_api/onappstart.py index 2a0346c6..a54b4a61 100755 --- a/src/metax_api/onappstart.py +++ b/src/metax_api/onappstart.py @@ -40,7 +40,8 @@ def ready(self): # pragma: no cover # because the "django apps" have not been loaded yet. import json - import metax_api.signals # noqa + if settings.ENABLE_SIGNALS: + import metax_api.signals # noqa from metax_api.services import RabbitMQService as rabbitmq from metax_api.services.redis_cache_service import RedisClient diff --git a/src/metax_api/settings/__init__.py b/src/metax_api/settings/__init__.py index c9950152..6dc65e87 100755 --- a/src/metax_api/settings/__init__.py +++ b/src/metax_api/settings/__init__.py @@ -28,7 +28,9 @@ ELASTIC_SEARCH_HOSTS=(list, ["localhost"]), ELASTIC_SEARCH_PORT=(int, 9200), ELASTIC_SEARCH_USE_SSL=(bool, False), + ENABLE_API_ERROR_OBJECTS=(bool, False), ENABLE_DELETED_OBJECTS_SAVING=(bool, False), + ENABLE_SIGNALS=(bool, False), ENABLE_V1_ENDPOINTS=(bool, True), ENABLE_V2_ENDPOINTS=(bool, True), ENABLE_DJANGO_WATCHMAN=(bool, False), diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 10756d6d..b8db7437 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -33,6 +33,8 @@ ERROR_FILES_PATH = env("ERROR_FILES_PATH") ENABLE_DELETED_OBJECTS_SAVING = env("ENABLE_DELETED_OBJECTS_SAVING") +ENABLE_SIGNALS = env("ENABLE_SIGNALS") +ENABLE_API_ERROR_OBJECTS = env("ENABLE_API_ERROR_OBJECTS") # Allow only specific hosts to access the app diff --git a/src/metax_api/signals/__init__.py b/src/metax_api/signals/__init__.py index 3c389606..f53b3d98 100644 --- a/src/metax_api/signals/__init__.py +++ b/src/metax_api/signals/__init__.py @@ -1,2 +1,5 @@ -from .post_delete import * -from .request_finished import * \ No newline at end of file +from django.conf import settings + +if settings.ENABLE_SIGNALS: + from .post_delete import * + from .request_finished import * \ No newline at end of file diff --git a/src/metax_api/tests/api/rest/v2/views/apierrors/read.py b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py index ce8965e7..5d2d0d06 100755 --- a/src/metax_api/tests/api/rest/v2/views/apierrors/read.py +++ b/src/metax_api/tests/api/rest/v2/views/apierrors/read.py @@ -5,6 +5,7 @@ # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT import logging +import unittest from unittest.mock import patch from uuid import uuid4 @@ -14,10 +15,12 @@ from metax_api.models import ApiError from metax_api.tests.utils import TestClassUtils, test_data_file_path, testcase_log_console +from django.conf import settings _logger = logging.getLogger(__name__) +@unittest.skipIf(settings.ENABLE_API_ERROR_OBJECTS is not True, "Only run if API errors are objects") class ApiErrorReadBasicTests(APITestCase, TestClassUtils): """ diff --git a/src/metax_api/tests/models/signals.py b/src/metax_api/tests/models/signals.py index 3e3b57e3..08cf9947 100644 --- a/src/metax_api/tests/models/signals.py +++ b/src/metax_api/tests/models/signals.py @@ -6,21 +6,23 @@ # :license: MIT from datetime import date - +import unittest from django.core.management import call_command from django.test import TestCase +from django.conf import settings -from metax_api.models import CatalogRecord, CatalogRecordV2, DeletedObject, Directory, File +from metax_api.models import CatalogRecord, CatalogRecordV2, DeletedObject from metax_api.tests.utils import TestClassUtils, test_data_file_path +@unittest.skipIf(settings.ENABLE_DELETED_OBJECTS_SAVING is not True, "Only run if deleted objects are saved") class SignalTests(TestCase, TestClassUtils): def setUp(self): call_command("loaddata", test_data_file_path, verbosity=0) self.today = date.today().strftime("%d/%m/%Y") -""" def test_deleting_catalog_record_creates_new_deleted_object(self): + def test_deleting_catalog_record_creates_new_deleted_object(self): # test that deleting CatalogRecord object creates a new deleted object CatalogRecord.objects_unfiltered.get(pk=1).delete(hard=True) deleted_object = DeletedObject.objects.last() @@ -31,6 +33,6 @@ def setUp(self): CatalogRecordV2.objects_unfiltered.get(pk=2).delete(hard=True) deleted_object_v2 = DeletedObject.objects.last() self.assertEqual(deleted_object_v2.model_name, "CatalogRecordV2") - self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today)""" + self.assertEqual(deleted_object_v2.date_deleted.strftime("%d/%m/%Y"), self.today) From 9ad787089116fe3c3237e15c905334d771c7b518 Mon Sep 17 00:00:00 2001 From: Toni Date: Fri, 26 Nov 2021 15:14:04 +0200 Subject: [PATCH 113/160] Add root logger log-messages to file-handler --- src/metax_api/settings/components/logging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metax_api/settings/components/logging.py b/src/metax_api/settings/components/logging.py index 93637a14..1d9e2e79 100755 --- a/src/metax_api/settings/components/logging.py +++ b/src/metax_api/settings/components/logging.py @@ -63,7 +63,7 @@ "metax_api": { "handlers": ["general", "console", "debug"], }, - "root": {"level": LOGGING_LEVEL, "handlers": ["console"]}, + "root": {"level": LOGGING_LEVEL, "handlers": ["console", "general"]}, }, } From 81b6058517c4cfe8fffe650ddb1dd411454d624f Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Wed, 24 Nov 2021 11:46:47 +0200 Subject: [PATCH 114/160] Added management command for marking files as removed --- .../management/commands/mark_files_removed.py | 49 +++++++++++++++++++ .../management/commands/mark_files_removed.py | 35 +++++++++++++ src/metax_api/tests/utils.py | 24 ++++++++- 3 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 src/metax_api/management/commands/mark_files_removed.py create mode 100644 src/metax_api/tests/management/commands/mark_files_removed.py diff --git a/src/metax_api/management/commands/mark_files_removed.py b/src/metax_api/management/commands/mark_files_removed.py new file mode 100644 index 00000000..0f5de913 --- /dev/null +++ b/src/metax_api/management/commands/mark_files_removed.py @@ -0,0 +1,49 @@ +import logging + +from django.core.management.base import BaseCommand +from metax_api.models import File +from metax_api.tests.utils import management_command_add_test_logs + +logger = logging.getLogger(__name__) + +class Command(BaseCommand): + help = """Marks files which start with a given file path removed from a given project. + File path prefix can be given as command line parameter or they can be read from a file. + This command will produce duplicate prints, the first line is used in tests, but is not stored on logs. + The second line is stored on logs, but can't be used by the tests.""" + + def add_arguments(self, parser): + parser.add_argument("project_identifier", type=str, help="Identifier of the project where the files are removed") + parser.add_argument("--path_prefix", type=str, help="Prefix for the file path of the files that are removed") + parser.add_argument("--path_prefix_file", type=str, help="Name of the file where to read the path prefixes (Required if --path-prefix is not given)") + + @management_command_add_test_logs(logger) + def handle(self, *args, **options): + logger.info("Remove files from database") + if options["path_prefix"] is None and options["path_prefix_file"] is None: + logger.error("path_prefix or path_prefix_file is required") + return + + if options["path_prefix_file"]: + path_prefixes = self.read_prefixes_from_file(options["path_prefix_file"]) + else: + path_prefixes = [options["path_prefix"]] + + + removed_files_sum = 0 + for prefix in path_prefixes: + files = File.objects.filter(project_identifier = options["project_identifier"], file_path__startswith = prefix, removed = "f") + logger.info("Found %d files to remove in project: %s with path prefix: %s" % (len(files), options["project_identifier"], prefix)) + for file in files: + file.delete() + removed_files_sum += len(files) + + logger.info("Removed %d files" % removed_files_sum) + + + + def read_prefixes_from_file(self, filename): + with open(filename) as file: + lines = file.readlines() + lines = [line.rstrip() for line in lines] + return lines diff --git a/src/metax_api/tests/management/commands/mark_files_removed.py b/src/metax_api/tests/management/commands/mark_files_removed.py new file mode 100644 index 00000000..1b401108 --- /dev/null +++ b/src/metax_api/tests/management/commands/mark_files_removed.py @@ -0,0 +1,35 @@ +from io import StringIO +import logging + +from django.core.management import call_command +from django.test import TestCase + +from metax_api.models import File +from metax_api.tests.utils import test_data_file_path +from metax_api.management.commands.mark_files_removed import Command + +_logger = logging.getLogger(__name__) + +class RemoveFilesTest(TestCase): + + def setUp(self): + call_command("loaddata", test_data_file_path, verbosity=0) + + def test_command_output(self): + project_identifier = "project_x" + path_prefix = "/project_x_FROZEN/Experiment_X/Phase_1/2017" + path_prefix_file = None + + out = StringIO() + args = [project_identifier] + options = {"path_prefix": path_prefix, "stdout": out} + call_command('mark_files_removed', *args, **options) + + self.assertIn('Found 10 files to remove in project: ' + project_identifier + ' with path prefix: ' + path_prefix, out.getvalue()) + self.assertIn('Removed 10 files', out.getvalue()) + + files = File.objects_unfiltered.filter(project_identifier = project_identifier, file_path__startswith = path_prefix) + + self.assertEqual(10, len(files)) + for file in files: + self.assertTrue(file.removed) \ No newline at end of file diff --git a/src/metax_api/tests/utils.py b/src/metax_api/tests/utils.py index 6fd25b6a..d4cbb3e0 100755 --- a/src/metax_api/tests/utils.py +++ b/src/metax_api/tests/utils.py @@ -9,6 +9,7 @@ from base64 import b64encode from contextlib import contextmanager from json import load as json_load +from io import StringIO from os import path import jwt @@ -456,10 +457,15 @@ def _get_new_file_data( @contextmanager -def streamhandler_to_console(lggr): +def streamhandler_to_console(lggr, command_obj = None): # Use 'up to date' value of sys.stdout for StreamHandler, # as set by test runner. - stream_handler = logging.StreamHandler(sys.stdout) + # If command_obj is given, use OutputWrapper of the given + # Command object + if command_obj == None: + stream_handler = logging.StreamHandler(sys.stdout) + else: + stream_handler = logging.StreamHandler(command_obj.stdout) lggr.addHandler(stream_handler) yield lggr.removeHandler(stream_handler) @@ -474,3 +480,17 @@ def testcase_log_console(*args, **kwargs): return testcase_log_console return testcase_decorator + +def management_command_add_test_logs(lggr): + # This decorator can be used by management commands to capture + # the logs of the command in a format that can be used in tests + def management_command_log_decorator(func): + def management_command_add_test_logs(*args, **kwargs): + # args[0] is the object which uses this decorator (typically a command object) + with streamhandler_to_console(lggr, args[0]): + return func(*args, **kwargs) + + return management_command_add_test_logs + + return management_command_log_decorator + From 7758ab9cb35c99627ca9460d9ce0727211b94832 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 25 Nov 2021 09:39:15 +0200 Subject: [PATCH 115/160] Updated string format syntax --- src/metax_api/management/commands/mark_files_removed.py | 4 ++-- src/metax_api/tests/management/commands/mark_files_removed.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/metax_api/management/commands/mark_files_removed.py b/src/metax_api/management/commands/mark_files_removed.py index 0f5de913..22c9ec86 100644 --- a/src/metax_api/management/commands/mark_files_removed.py +++ b/src/metax_api/management/commands/mark_files_removed.py @@ -33,12 +33,12 @@ def handle(self, *args, **options): removed_files_sum = 0 for prefix in path_prefixes: files = File.objects.filter(project_identifier = options["project_identifier"], file_path__startswith = prefix, removed = "f") - logger.info("Found %d files to remove in project: %s with path prefix: %s" % (len(files), options["project_identifier"], prefix)) + logger.info(f"Found {len(files)} files to remove in project: {options['project_identifier']} with path prefix: {prefix}") for file in files: file.delete() removed_files_sum += len(files) - logger.info("Removed %d files" % removed_files_sum) + logger.info(f"Removed {removed_files_sum} files") diff --git a/src/metax_api/tests/management/commands/mark_files_removed.py b/src/metax_api/tests/management/commands/mark_files_removed.py index 1b401108..4cb0eaaf 100644 --- a/src/metax_api/tests/management/commands/mark_files_removed.py +++ b/src/metax_api/tests/management/commands/mark_files_removed.py @@ -25,7 +25,7 @@ def test_command_output(self): options = {"path_prefix": path_prefix, "stdout": out} call_command('mark_files_removed', *args, **options) - self.assertIn('Found 10 files to remove in project: ' + project_identifier + ' with path prefix: ' + path_prefix, out.getvalue()) + self.assertIn(f'Found 10 files to remove in project: {project_identifier} with path prefix: {path_prefix}', out.getvalue()) self.assertIn('Removed 10 files', out.getvalue()) files = File.objects_unfiltered.filter(project_identifier = project_identifier, file_path__startswith = path_prefix) From af229a48963de75f284d34c3ec39bd020ff1c33d Mon Sep 17 00:00:00 2001 From: tonurmi Date: Thu, 9 Dec 2021 16:59:33 +0200 Subject: [PATCH 116/160] Update dependencies with poetry update --- poetry.lock | 678 ++++++++++++++++++++++++++--------------------- requirements.txt | 87 +++--- 2 files changed, 422 insertions(+), 343 deletions(-) diff --git a/poetry.lock b/poetry.lock index 05e4a555..a6678110 100644 --- a/poetry.lock +++ b/poetry.lock @@ -118,23 +118,26 @@ d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] [[package]] name = "certifi" -version = "2021.5.30" +version = "2021.10.8" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = "*" [[package]] -name = "chardet" -version = "4.0.0" -description = "Universal encoding detector for Python 2 and 3" +name = "charset-normalizer" +version = "2.0.9" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.5.0" + +[package.extras] +unicode_backport = ["unicodedata2"] [[package]] name = "click" -version = "8.0.1" +version = "8.0.3" description = "Composable command line interface toolkit" category = "dev" optional = false @@ -187,7 +190,7 @@ tests = ["responses (>=0.10.6)", "mock (>=1.3.0)", "pytest-invenio (>=1.4.0)"] [[package]] name = "decorator" -version = "5.0.9" +version = "5.1.0" description = "Decorators for Humans" category = "dev" optional = false @@ -195,7 +198,7 @@ python-versions = ">=3.5" [[package]] name = "django" -version = "3.1.13" +version = "3.1.14" description = "A high-level Python Web framework that encourages rapid development and clean, pragmatic design." category = "main" optional = false @@ -212,7 +215,7 @@ bcrypt = ["bcrypt"] [[package]] name = "django-debug-toolbar" -version = "3.2.1" +version = "3.2.2" description = "A configurable set of panels that display various debug information about the current request/response." category = "dev" optional = false @@ -243,7 +246,7 @@ django = "*" [[package]] name = "django-split-settings" -version = "1.0.1" +version = "1.1.0" description = "Organize Django settings into multiple files and directories. Easily override and modify settings. Use wildcards and optional settings files." category = "main" optional = false @@ -298,7 +301,7 @@ https = ["urllib3[secure] (>=1.24.1)"] [[package]] name = "elasticsearch" -version = "7.13.3" +version = "7.16.0" description = "Python client for Elasticsearch" category = "main" optional = false @@ -316,7 +319,7 @@ requests = ["requests (>=2.4.0,<3.0.0)"] [[package]] name = "executing" -version = "0.7.0" +version = "0.8.2" description = "Get the currently executing AST node of a frame, and other information" category = "dev" optional = false @@ -352,15 +355,15 @@ pygments = ">=2.2.0" [[package]] name = "idna" -version = "2.10" +version = "3.3" description = "Internationalized Domain Names in Applications (IDNA)" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.5" [[package]] name = "idutils" -version = "1.1.8" +version = "1.1.9" description = "Small library for persistent identifiers used in scholarly communication." category = "main" optional = false @@ -371,13 +374,13 @@ isbnid-fork = ">=0.4.4" six = ">=1.10" [package.extras] -all = ["Sphinx (>=1.4.2)", "check-manifest (>=0.25)", "coverage (>=4.0)", "isort (>=4.2.2)", "pydocstyle (>=1.0)", "pytest-cache (>=1.0)", "pytest-cov (>=1.8.0)", "pytest-pep8 (>=1.0.6)", "pytest-runner (>=2.6.2)", "pytest (>=3.6.0)"] -docs = ["Sphinx (>=1.4.2)"] -tests = ["check-manifest (>=0.25)", "coverage (>=4.0)", "isort (>=4.2.2)", "pydocstyle (>=1.0)", "pytest-cache (>=1.0)", "pytest-cov (>=1.8.0)", "pytest-pep8 (>=1.0.6)", "pytest-runner (>=2.6.2)", "pytest (>=3.6.0)"] +all = ["Sphinx (>=3)", "pytest-cache (>=1.0)", "pytest-runner (>=2.6.2)", "pytest-invenio (>=1.4.0)"] +docs = ["Sphinx (>=3)"] +tests = ["pytest-cache (>=1.0)", "pytest-runner (>=2.6.2)", "pytest-invenio (>=1.4.0)"] [[package]] name = "imagesize" -version = "1.2.0" +version = "1.3.0" description = "Getting image size from png/jpeg/jpeg2000/gif file" category = "main" optional = true @@ -385,7 +388,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.6.1" +version = "4.8.2" description = "Read metadata from Python packages" category = "main" optional = false @@ -398,7 +401,7 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "ipdb" @@ -415,7 +418,7 @@ toml = {version = ">=0.10.2", markers = "python_version > \"3.6\""} [[package]] name = "ipython" -version = "7.25.0" +version = "7.30.1" description = "IPython: Productive Interactive Computing" category = "dev" optional = false @@ -445,14 +448,6 @@ parallel = ["ipyparallel"] qtconsole = ["qtconsole"] test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.17)"] -[[package]] -name = "ipython-genutils" -version = "0.2.0" -description = "Vestigial utilities from IPython" -category = "dev" -optional = false -python-versions = "*" - [[package]] name = "isbnid-fork" version = "0.5.2" @@ -480,7 +475,7 @@ six = "*" [[package]] name = "isort" -version = "5.9.1" +version = "5.10.1" description = "A Python utility / library to sort Python imports." category = "dev" optional = false @@ -494,7 +489,7 @@ plugins = ["setuptools"] [[package]] name = "jedi" -version = "0.18.0" +version = "0.18.1" description = "An autocompletion tool for Python that can be used for text editors." category = "dev" optional = false @@ -505,11 +500,11 @@ parso = ">=0.8.0,<0.9.0" [package.extras] qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["Django (<3.1)", "colorama", "docopt", "pytest (<6.0.0)"] +testing = ["Django (<3.1)", "colorama", "docopt", "pytest (<7.0.0)"] [[package]] name = "jinja2" -version = "3.0.1" +version = "3.0.3" description = "A very fast and expressive template engine." category = "main" optional = true @@ -553,7 +548,7 @@ tornado = {version = "*", markers = "python_version > \"2.7\""} [[package]] name = "lxml" -version = "4.6.3" +version = "4.6.4" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." category = "main" optional = false @@ -575,7 +570,7 @@ python-versions = ">=3.6" [[package]] name = "matplotlib-inline" -version = "0.1.2" +version = "0.1.3" description = "Inline Matplotlib backend for Jupyter" category = "dev" optional = false @@ -594,18 +589,18 @@ python-versions = "*" [[package]] name = "packaging" -version = "21.0" +version = "21.3" description = "Core utilities for Python packages" category = "main" optional = true python-versions = ">=3.6" [package.dependencies] -pyparsing = ">=2.0.2" +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "parso" -version = "0.8.2" +version = "0.8.3" description = "A Python Parser" category = "dev" optional = false @@ -617,11 +612,11 @@ testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathspec" -version = "0.8.1" +version = "0.9.0" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [[package]] name = "pexpect" @@ -657,18 +652,18 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.19" +version = "3.0.24" description = "Library for building powerful interactive command lines in Python" category = "dev" optional = false -python-versions = ">=3.6.1" +python-versions = ">=3.6.2" [package.dependencies] wcwidth = "*" [[package]] name = "psycopg2-binary" -version = "2.9.1" +version = "2.9.2" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = false @@ -684,7 +679,7 @@ python-versions = "*" [[package]] name = "pygments" -version = "2.9.0" +version = "2.10.0" description = "Pygments is a syntax highlighting package written in Python." category = "main" optional = false @@ -692,15 +687,15 @@ python-versions = ">=3.5" [[package]] name = "pyjwt" -version = "2.1.0" +version = "2.3.0" description = "JSON Web Token implementation in Python" category = "dev" optional = false python-versions = ">=3.6" [package.extras] -crypto = ["cryptography (>=3.3.1,<4.0.0)"] -dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1,<4.0.0)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] +crypto = ["cryptography (>=3.3.1)"] +dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] @@ -725,11 +720,14 @@ resolved_reference = "5f6eba1201270d930ed684e15e9b9fc885649d17" [[package]] name = "pyparsing" -version = "2.4.7" +version = "3.0.6" description = "Python parsing module" category = "main" optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = ">=3.6" + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pyrsistent" @@ -741,7 +739,7 @@ python-versions = ">=3.6" [[package]] name = "python-box" -version = "5.3.0" +version = "5.4.1" description = "Advanced Python dictionaries with dot notation access" category = "main" optional = false @@ -753,10 +751,11 @@ all = ["ruamel.yaml", "toml", "msgpack"] msgpack = ["msgpack"] "ruamel.yaml" = ["ruamel.yaml"] toml = ["toml"] +yaml = ["ruamel.yaml"] [[package]] name = "python-dateutil" -version = "2.8.1" +version = "2.8.2" description = "Extensions to the standard Python datetime module" category = "main" optional = false @@ -775,7 +774,7 @@ python-versions = "*" [[package]] name = "pytz" -version = "2021.1" +version = "2021.3" description = "World timezone definitions, modern and historical" category = "main" optional = false @@ -821,7 +820,7 @@ hiredis = ["hiredis (>=0.1.3)"] [[package]] name = "regex" -version = "2021.7.6" +version = "2021.11.10" description = "Alternative regular expression module, to replace re." category = "dev" optional = false @@ -829,25 +828,25 @@ python-versions = "*" [[package]] name = "requests" -version = "2.25.1" +version = "2.26.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" [package.dependencies] certifi = ">=2017.4.17" -chardet = ">=3.0.2,<5" -idna = ">=2.5,<3" +charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} +idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} urllib3 = ">=1.21.1,<1.27" [package.extras] -security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] [[package]] name = "responses" -version = "0.13.3" +version = "0.13.4" description = "A utility library for mocking out the `requests` Python library." category = "dev" optional = false @@ -859,7 +858,7 @@ six = "*" urllib3 = ">=1.25.10" [package.extras] -tests = ["coverage (>=3.7.1,<6.0.0)", "pytest-cov", "pytest-localserver", "flake8", "pytest (>=4.6,<5.0)", "pytest (>=4.6)", "mypy"] +tests = ["coverage (>=3.7.1,<6.0.0)", "pytest-cov", "pytest-localserver", "flake8", "types-mock", "types-requests", "types-six", "pytest (>=4.6,<5.0)", "pytest (>=4.6)", "mypy"] [[package]] name = "six" @@ -871,7 +870,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "snowballstemmer" -version = "2.1.0" +version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." category = "main" optional = true @@ -879,7 +878,7 @@ python-versions = "*" [[package]] name = "sphinx" -version = "4.0.3" +version = "4.3.1" description = "Python documentation generator" category = "main" optional = true @@ -898,10 +897,10 @@ requests = ">=2.5.0" snowballstemmer = ">=1.1" sphinxcontrib-applehelp = "*" sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" sphinxcontrib-jsmath = "*" sphinxcontrib-qthelp = "*" -sphinxcontrib-serializinghtml = "*" +sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] @@ -1012,7 +1011,7 @@ test = ["pytest"] [[package]] name = "sqlparse" -version = "0.4.1" +version = "0.4.2" description = "A non-validating SQL parser." category = "main" optional = false @@ -1020,7 +1019,7 @@ python-versions = ">=3.5" [[package]] name = "structlog" -version = "21.1.0" +version = "21.4.0" description = "Structured Logging for Python" category = "main" optional = false @@ -1030,9 +1029,9 @@ python-versions = ">=3.6" typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] -dev = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest-randomly", "pytest (>=6.0)", "simplejson", "furo", "sphinx", "sphinx-toolbox", "twisted", "pre-commit"] -docs = ["furo", "sphinx", "sphinx-toolbox", "twisted"] -tests = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest-randomly", "pytest (>=6.0)", "simplejson"] +dev = ["pre-commit", "rich", "cogapp", "tomli", "coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest (>=6.0)", "simplejson", "furo", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "sphinxcontrib-mermaid", "twisted"] +tests = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest-asyncio", "pytest (>=6.0)", "simplejson"] [[package]] name = "tblib" @@ -1060,37 +1059,34 @@ python-versions = ">= 3.5" [[package]] name = "traitlets" -version = "5.0.5" +version = "5.1.1" description = "Traitlets Python configuration system" category = "dev" optional = false python-versions = ">=3.7" -[package.dependencies] -ipython-genutils = "*" - [package.extras] test = ["pytest"] [[package]] name = "typed-ast" -version = "1.4.3" +version = "1.5.1" description = "a fork of Python 2 and 3 ast modules with type comment support" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "typing-extensions" -version = "3.10.0.0" -description = "Backported and Experimental Type Hints for Python 3.5+" +version = "4.0.1" +description = "Backported and Experimental Type Hints for Python 3.6+" category = "main" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "urllib3" -version = "1.26.6" +version = "1.26.7" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -1119,7 +1115,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "zipp" -version = "3.5.0" +version = "3.6.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false @@ -1179,16 +1175,16 @@ black = [ {file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"}, ] certifi = [ - {file = "certifi-2021.5.30-py2.py3-none-any.whl", hash = "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"}, - {file = "certifi-2021.5.30.tar.gz", hash = "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee"}, + {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, + {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, ] -chardet = [ - {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, - {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"}, +charset-normalizer = [ + {file = "charset-normalizer-2.0.9.tar.gz", hash = "sha256:b0b883e8e874edfdece9c28f314e3dd5badf067342e42fb162203335ae61aa2c"}, + {file = "charset_normalizer-2.0.9-py3-none-any.whl", hash = "sha256:1eecaa09422db5be9e29d7fc65664e6c33bd06f9ced7838578ba40d58bdf3721"}, ] click = [ - {file = "click-8.0.1-py3-none-any.whl", hash = "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"}, - {file = "click-8.0.1.tar.gz", hash = "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a"}, + {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, + {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, ] colorama = [ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, @@ -1253,16 +1249,16 @@ datacite = [ {file = "datacite-1.1.2.tar.gz", hash = "sha256:0164bc2ff35bba643897201eb359611abb43ff5811a9ac17fb5592cd643b4443"}, ] decorator = [ - {file = "decorator-5.0.9-py3-none-any.whl", hash = "sha256:6e5c199c16f7a9f0e3a61a4a54b3d27e7dad0dbdde92b944426cb20914376323"}, - {file = "decorator-5.0.9.tar.gz", hash = "sha256:72ecfba4320a893c53f9706bebb2d55c270c1e51a28789361aa93e4a21319ed5"}, + {file = "decorator-5.1.0-py3-none-any.whl", hash = "sha256:7b12e7c3c6ab203a29e157335e9122cb03de9ab7264b137594103fd4a683b374"}, + {file = "decorator-5.1.0.tar.gz", hash = "sha256:e59913af105b9860aa2c8d3272d9de5a56a4e608db9a2f167a8480b323d529a7"}, ] django = [ - {file = "Django-3.1.13-py3-none-any.whl", hash = "sha256:a6e0d1ff11095b7394c079ade7094c73b2dc3df4a7a373c9b58ed73b77a97feb"}, - {file = "Django-3.1.13.tar.gz", hash = "sha256:9f8be75646f62204320b195062b1d696ba28aa3d45ee72fb7c888ffaebc5bdb2"}, + {file = "Django-3.1.14-py3-none-any.whl", hash = "sha256:0fabc786489af16ad87a8c170ba9d42bfd23f7b699bd5ef05675864e8d012859"}, + {file = "Django-3.1.14.tar.gz", hash = "sha256:72a4a5a136a214c39cf016ccdd6b69e2aa08c7479c66d93f3a9b5e4bb9d8a347"}, ] django-debug-toolbar = [ - {file = "django-debug-toolbar-3.2.1.tar.gz", hash = "sha256:a5ff2a54f24bf88286f9872836081078f4baa843dc3735ee88524e89f8821e33"}, - {file = "django_debug_toolbar-3.2.1-py3-none-any.whl", hash = "sha256:e759e63e3fe2d3110e0e519639c166816368701eab4a47fed75d7de7018467b9"}, + {file = "django-debug-toolbar-3.2.2.tar.gz", hash = "sha256:8c5b13795d4040008ee69ba82dcdd259c49db346cf7d0de6e561a49d191f0860"}, + {file = "django_debug_toolbar-3.2.2-py3-none-any.whl", hash = "sha256:d7bab7573fab35b0fd029163371b7182f5826c13da69734beb675c761d06a4d3"}, ] django-environ = [ {file = "django-environ-0.4.5.tar.gz", hash = "sha256:6c9d87660142608f63ec7d5ce5564c49b603ea8ff25da595fd6098f6dc82afde"}, @@ -1272,8 +1268,8 @@ django-rainbowtests = [ {file = "django-rainbowtests-0.6.0.tar.gz", hash = "sha256:0700ee1386935822dca296d323d67b0563cb2e5012b553ebca7c9391f2298cd9"}, ] django-split-settings = [ - {file = "django-split-settings-1.0.1.tar.gz", hash = "sha256:2da16cd967cd38315ec7ff0ae0c9db8488f8528bb2e5de26cd898328dc4bbeac"}, - {file = "django_split_settings-1.0.1-py3-none-any.whl", hash = "sha256:8d636649023289d0ef0ba08b0a4f37761adc94a29ee0ebfe65922c3cb0594ede"}, + {file = "django-split-settings-1.1.0.tar.gz", hash = "sha256:6b3aed89667a95525152026eab93a9f038ff22df6883006318b8b4a3d0ca6888"}, + {file = "django_split_settings-1.1.0-py3-none-any.whl", hash = "sha256:5d97ae64cf9ed14a831722d82ac725944667ac8c08307b7cfd22e91367b411d0"}, ] django-watchman = [ {file = "django-watchman-1.2.0.tar.gz", hash = "sha256:c38830c58984b8eb29db30a3e332968d1c7e235dee3f5c0a907d8d79a37a3125"}, @@ -1297,14 +1293,15 @@ dulwich = [ {file = "dulwich-0.19.16.tar.gz", hash = "sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850"}, ] elasticsearch = [ - {file = "elasticsearch-7.13.3-py2.py3-none-any.whl", hash = "sha256:76cc7670449676138acbab8872eb34867db033701310d9b01fb2ecfba5fb7234"}, - {file = "elasticsearch-7.13.3.tar.gz", hash = "sha256:d539d82552804b3d41033377b9adf11c39c749ee1764af3d74b56f42177e3281"}, + {file = "elasticsearch-7.16.0-py2.py3-none-any.whl", hash = "sha256:9a5a2fd53a4fce28f15f358ab13fbcfb06f47fb2c7400ea89c10d6fd3f236ecd"}, + {file = "elasticsearch-7.16.0.tar.gz", hash = "sha256:d7f8665715ad80e3e99e42388bcc49c1b06162f72acfa1f8febe2baf5570b0ed"}, ] executing = [ - {file = "executing-0.7.0-py2.py3-none-any.whl", hash = "sha256:1971c98963857f2c03f4b688d93fc4b28ce756bd102955ea8ea7ce0a7fd9a28f"}, - {file = "executing-0.7.0.tar.gz", hash = "sha256:509fe590e9da1c0659a273c42493a25af6f43d61cf36f085fc1b6cf2c6419d1f"}, + {file = "executing-0.8.2-py2.py3-none-any.whl", hash = "sha256:32fc6077b103bd19e6494a72682d66d5763cf20a106d5aa7c5ccbea4e47b0df7"}, + {file = "executing-0.8.2.tar.gz", hash = "sha256:c23bf42e9a7b9b212f185b1b2c3c91feb895963378887bb10e64a2e612ec0023"}, ] gunicorn = [ + {file = "gunicorn-20.1.0-py3-none-any.whl", hash = "sha256:9dcc4547dbb1cb284accfb15ab5667a0e5d1881cc443e0677b4882a4067a807e"}, {file = "gunicorn-20.1.0.tar.gz", hash = "sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8"}, ] icecream = [ @@ -1312,31 +1309,27 @@ icecream = [ {file = "icecream-2.1.1.tar.gz", hash = "sha256:47e00e3f4e8477996e7dc420b6fa8ba53f8ced17de65320fedb5b15997b76589"}, ] idna = [ - {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"}, - {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"}, + {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, + {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, ] idutils = [ - {file = "IDUtils-1.1.8-py2.py3-none-any.whl", hash = "sha256:6a032009da9dad262c2d1ad4978100b5937749e27862845a7bf49c38a39e3fff"}, - {file = "IDUtils-1.1.8.tar.gz", hash = "sha256:21497bf279b64aadce923a11b1ed9b601c0bf01eb82c3f952877eef026586d78"}, + {file = "IDUtils-1.1.9-py2.py3-none-any.whl", hash = "sha256:01edcde8394b73dc725958770e6e8d20088790345639982698715749f8213acc"}, + {file = "IDUtils-1.1.9.tar.gz", hash = "sha256:80c10f2ecb5cf020aa788ea860fa1f450a5a6714a0e95ae62334d5d286b395be"}, ] imagesize = [ - {file = "imagesize-1.2.0-py2.py3-none-any.whl", hash = "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1"}, - {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, + {file = "imagesize-1.3.0-py2.py3-none-any.whl", hash = "sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c"}, + {file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.6.1-py3-none-any.whl", hash = "sha256:9f55f560e116f8643ecf2922d9cd3e1c7e8d52e683178fecd9d08f6aa357e11e"}, - {file = "importlib_metadata-4.6.1.tar.gz", hash = "sha256:079ada16b7fc30dfbb5d13399a5113110dab1aa7c2bc62f66af75f0b717c8cac"}, + {file = "importlib_metadata-4.8.2-py3-none-any.whl", hash = "sha256:53ccfd5c134223e497627b9815d5030edf77d2ed573922f7a0b8f8bb81a1c100"}, + {file = "importlib_metadata-4.8.2.tar.gz", hash = "sha256:75bdec14c397f528724c1bfd9709d660b33a4d2e77387a3358f20b848bb5e5fb"}, ] ipdb = [ {file = "ipdb-0.13.9.tar.gz", hash = "sha256:951bd9a64731c444fd907a5ce268543020086a697f6be08f7cc2c9a752a278c5"}, ] ipython = [ - {file = "ipython-7.25.0-py3-none-any.whl", hash = "sha256:aa21412f2b04ad1a652e30564fff6b4de04726ce875eab222c8430edc6db383a"}, - {file = "ipython-7.25.0.tar.gz", hash = "sha256:54bbd1fe3882457aaf28ae060a5ccdef97f212a741754e420028d4ec5c2291dc"}, -] -ipython-genutils = [ - {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, - {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, + {file = "ipython-7.30.1-py3-none-any.whl", hash = "sha256:fc60ef843e0863dd4e24ab2bb5698f071031332801ecf8d1aeb4fb622056545c"}, + {file = "ipython-7.30.1.tar.gz", hash = "sha256:cb6aef731bf708a7727ab6cde8df87f0281b1427d41e65d62d4b68934fa54e97"}, ] isbnid-fork = [ {file = "isbnid_fork-0.5.2.tar.gz", hash = "sha256:8d878866aa0e7f06e700a37fce586c7398ce4837da8bca39683db7028a9c3837"}, @@ -1346,16 +1339,16 @@ isodate = [ {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, ] isort = [ - {file = "isort-5.9.1-py3-none-any.whl", hash = "sha256:8e2c107091cfec7286bc0f68a547d0ba4c094d460b732075b6fba674f1035c0c"}, - {file = "isort-5.9.1.tar.gz", hash = "sha256:83510593e07e433b77bd5bff0f6f607dbafa06d1a89022616f02d8b699cfcd56"}, + {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, + {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"}, ] jedi = [ - {file = "jedi-0.18.0-py2.py3-none-any.whl", hash = "sha256:18456d83f65f400ab0c2d3319e48520420ef43b23a086fdc05dff34132f0fb93"}, - {file = "jedi-0.18.0.tar.gz", hash = "sha256:92550a404bad8afed881a137ec9a461fed49eca661414be45059329614ed0707"}, + {file = "jedi-0.18.1-py2.py3-none-any.whl", hash = "sha256:637c9635fcf47945ceb91cd7f320234a7be540ded6f3e99a50cb6febdfd1ba8d"}, + {file = "jedi-0.18.1.tar.gz", hash = "sha256:74137626a64a99c8eb6ae5832d99b3bdd7d29a3850fe2aa80a4126b2a7d949ab"}, ] jinja2 = [ - {file = "Jinja2-3.0.1-py3-none-any.whl", hash = "sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4"}, - {file = "Jinja2-3.0.1.tar.gz", hash = "sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4"}, + {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, + {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, ] jsonschema = [ {file = "jsonschema-3.2.0-py2.py3-none-any.whl", hash = "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163"}, @@ -1365,50 +1358,90 @@ livereload = [ {file = "livereload-2.6.3.tar.gz", hash = "sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869"}, ] lxml = [ - {file = "lxml-4.6.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:df7c53783a46febb0e70f6b05df2ba104610f2fb0d27023409734a3ecbb78fb2"}, - {file = "lxml-4.6.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:1b7584d421d254ab86d4f0b13ec662a9014397678a7c4265a02a6d7c2b18a75f"}, - {file = "lxml-4.6.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:079f3ae844f38982d156efce585bc540c16a926d4436712cf4baee0cce487a3d"}, - {file = "lxml-4.6.3-cp27-cp27m-win32.whl", hash = "sha256:bc4313cbeb0e7a416a488d72f9680fffffc645f8a838bd2193809881c67dd106"}, - {file = "lxml-4.6.3-cp27-cp27m-win_amd64.whl", hash = "sha256:8157dadbb09a34a6bd95a50690595e1fa0af1a99445e2744110e3dca7831c4ee"}, - {file = "lxml-4.6.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7728e05c35412ba36d3e9795ae8995e3c86958179c9770e65558ec3fdfd3724f"}, - {file = "lxml-4.6.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:4bff24dfeea62f2e56f5bab929b4428ae6caba2d1eea0c2d6eb618e30a71e6d4"}, - {file = "lxml-4.6.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:74f7d8d439b18fa4c385f3f5dfd11144bb87c1da034a466c5b5577d23a1d9b51"}, - {file = "lxml-4.6.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f90ba11136bfdd25cae3951af8da2e95121c9b9b93727b1b896e3fa105b2f586"}, - {file = "lxml-4.6.3-cp35-cp35m-win32.whl", hash = "sha256:f2380a6376dfa090227b663f9678150ef27543483055cc327555fb592c5967e2"}, - {file = "lxml-4.6.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c4f05c5a7c49d2fb70223d0d5bcfbe474cf928310ac9fa6a7c6dddc831d0b1d4"}, - {file = "lxml-4.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d2e35d7bf1c1ac8c538f88d26b396e73dd81440d59c1ef8522e1ea77b345ede4"}, - {file = "lxml-4.6.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:289e9ca1a9287f08daaf796d96e06cb2bc2958891d7911ac7cae1c5f9e1e0ee3"}, - {file = "lxml-4.6.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:bccbfc27563652de7dc9bdc595cb25e90b59c5f8e23e806ed0fd623755b6565d"}, - {file = "lxml-4.6.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:820628b7b3135403540202e60551e741f9b6d3304371712521be939470b454ec"}, - {file = "lxml-4.6.3-cp36-cp36m-win32.whl", hash = "sha256:5a0a14e264069c03e46f926be0d8919f4105c1623d620e7ec0e612a2e9bf1c04"}, - {file = "lxml-4.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:92e821e43ad382332eade6812e298dc9701c75fe289f2a2d39c7960b43d1e92a"}, - {file = "lxml-4.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:efd7a09678fd8b53117f6bae4fa3825e0a22b03ef0a932e070c0bdbb3a35e654"}, - {file = "lxml-4.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:efac139c3f0bf4f0939f9375af4b02c5ad83a622de52d6dfa8e438e8e01d0eb0"}, - {file = "lxml-4.6.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0fbcf5565ac01dff87cbfc0ff323515c823081c5777a9fc7703ff58388c258c3"}, - {file = "lxml-4.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:122fba10466c7bd4178b07dba427aa516286b846b2cbd6f6169141917283aae2"}, - {file = "lxml-4.6.3-cp37-cp37m-win32.whl", hash = "sha256:3439c71103ef0e904ea0a1901611863e51f50b5cd5e8654a151740fde5e1cade"}, - {file = "lxml-4.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4289728b5e2000a4ad4ab8da6e1db2e093c63c08bdc0414799ee776a3f78da4b"}, - {file = "lxml-4.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b007cbb845b28db4fb8b6a5cdcbf65bacb16a8bd328b53cbc0698688a68e1caa"}, - {file = "lxml-4.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:76fa7b1362d19f8fbd3e75fe2fb7c79359b0af8747e6f7141c338f0bee2f871a"}, - {file = "lxml-4.6.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:26e761ab5b07adf5f555ee82fb4bfc35bf93750499c6c7614bd64d12aaa67927"}, - {file = "lxml-4.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:66e575c62792c3f9ca47cb8b6fab9e35bab91360c783d1606f758761810c9791"}, - {file = "lxml-4.6.3-cp38-cp38-win32.whl", hash = "sha256:89b8b22a5ff72d89d48d0e62abb14340d9e99fd637d046c27b8b257a01ffbe28"}, - {file = "lxml-4.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:2a9d50e69aac3ebee695424f7dbd7b8c6d6eb7de2a2eb6b0f6c7db6aa41e02b7"}, - {file = "lxml-4.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ce256aaa50f6cc9a649c51be3cd4ff142d67295bfc4f490c9134d0f9f6d58ef0"}, - {file = "lxml-4.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:7610b8c31688f0b1be0ef882889817939490a36d0ee880ea562a4e1399c447a1"}, - {file = "lxml-4.6.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f8380c03e45cf09f8557bdaa41e1fa7c81f3ae22828e1db470ab2a6c96d8bc23"}, - {file = "lxml-4.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:884ab9b29feaca361f7f88d811b1eea9bfca36cf3da27768d28ad45c3ee6f969"}, - {file = "lxml-4.6.3-cp39-cp39-win32.whl", hash = "sha256:33bb934a044cf32157c12bfcfbb6649807da20aa92c062ef51903415c704704f"}, - {file = "lxml-4.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:542d454665a3e277f76954418124d67516c5f88e51a900365ed54a9806122b83"}, - {file = "lxml-4.6.3.tar.gz", hash = "sha256:39b78571b3b30645ac77b95f7c69d1bffc4cf8c3b157c435a34da72e78c82468"}, + {file = "lxml-4.6.4-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bbf2dc330bd44bfc0254ab37677ec60f7c7ecea55ad8ba1b8b2ea7bf20c265f5"}, + {file = "lxml-4.6.4-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b667c51682fe9b9788c69465956baa8b6999531876ccedcafc895c74ad716cd8"}, + {file = "lxml-4.6.4-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:72e730d33fe2e302fd07285f14624fca5e5e2fb2bb4fb2c3941e318c41c443d1"}, + {file = "lxml-4.6.4-cp27-cp27m-win32.whl", hash = "sha256:433df8c7dde0f9e41cbf4f36b0829d50a378116ef5e962ba3881f2f5f025c7be"}, + {file = "lxml-4.6.4-cp27-cp27m-win_amd64.whl", hash = "sha256:35752ee40f7bbf6adc9ff4e1f4b84794a3593736dcce80db32e3c2aa85e294ac"}, + {file = "lxml-4.6.4-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ff5bb2a198ea67403bb6818705e9a4f90e0313f2215428ec51001ce56d939fb"}, + {file = "lxml-4.6.4-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9b87727561c1150c0cc91c5d9d389448b37a7d15f0ba939ed3d1acb2f11bf6c5"}, + {file = "lxml-4.6.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:45fdb2899c755138722797161547a40b3e2a06feda620cc41195ee7e97806d81"}, + {file = "lxml-4.6.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:38b9de0de3aa689fe9fb9877ae1be1e83b8cf9621f7e62049d0436b9ecf4ad64"}, + {file = "lxml-4.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:662523cd2a0246740225c7e32531f2e766544122e58bee70e700a024cfc0cf81"}, + {file = "lxml-4.6.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:4aa349c5567651f34d4eaae7de6ed5b523f6d70a288f9c6fbac22d13a0784e04"}, + {file = "lxml-4.6.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:08eb9200d88b376a8ed5e50f1dc1d1a45b49305169674002a3b5929943390591"}, + {file = "lxml-4.6.4-cp310-cp310-win32.whl", hash = "sha256:bdc224f216ead849e902151112efef6e96c41ee1322e15d4e5f7c8a826929aee"}, + {file = "lxml-4.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:ab6db93a2b6b66cbf62b4e4a7135f476e708e8c5c990d186584142c77d7f975a"}, + {file = "lxml-4.6.4-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50790313df028aa05cf22be9a8da033b86c42fa32523e4fd944827b482b17bf0"}, + {file = "lxml-4.6.4-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6764998345552b1dfc9326a932d2bad6367c6b37a176bb73ada6b9486bf602f7"}, + {file = "lxml-4.6.4-cp35-cp35m-win32.whl", hash = "sha256:543b239b191bb3b6d9bef5f09f1fb2be5b7eb09ab4d386aa655e4d53fbe9ff47"}, + {file = "lxml-4.6.4-cp35-cp35m-win_amd64.whl", hash = "sha256:a75c1ad05eedb1a3ff2a34a52a4f0836cfaa892e12796ba39a7732c82701eff4"}, + {file = "lxml-4.6.4-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:47e955112ce64241fdb357acf0216081f9f3255b3ac9c502ca4b3323ec1ca558"}, + {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:20d7c8d90d449c6a353b15ee0459abae8395dbe59ad01e406ccbf30cd81c6f98"}, + {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:240db6f3228d26e3c6f4fad914b9ddaaf8707254e8b3efd564dc680c8ec3c264"}, + {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:351482da8dd028834028537f08724b1de22d40dcf3bb723b469446564f409074"}, + {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e678a643177c0e5ec947b645fa7bc84260dfb9b6bf8fb1fdd83008dfc2ca5928"}, + {file = "lxml-4.6.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:15d0381feb56f08f78c5cc4fc385ddfe0bde1456e37f54a9322833371aec4060"}, + {file = "lxml-4.6.4-cp36-cp36m-win32.whl", hash = "sha256:4ba74afe5ee5cb5e28d83b513a6e8f0875fda1dc1a9aea42cc0065f029160d2a"}, + {file = "lxml-4.6.4-cp36-cp36m-win_amd64.whl", hash = "sha256:9c91a73971a922c13070fd8fa5a114c858251791ba2122a941e6aa781c713e44"}, + {file = "lxml-4.6.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:6020c70ff695106bf80651953a23e37718ef1fee9abd060dcad8e32ab2dc13f3"}, + {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f5dd358536b8a964bf6bd48de038754c1609e72e5f17f5d21efe2dda17594dbf"}, + {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7ae7089d81fc502df4b217ad77f03c54039fe90dac0acbe70448d7e53bfbc57e"}, + {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:80d10d53d3184837445ff8562021bdd37f57c4cadacbf9d8726cc16220a00d54"}, + {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e95da348d57eb448d226a44b868ff2ca5786fbcbe417ac99ff62d0a7d724b9c7"}, + {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ffd65cfa33fed01735c82aca640fde4cc63f0414775cba11e06f84fae2085a6e"}, + {file = "lxml-4.6.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:877666418598f6cb289546c77ff87590cfd212f903b522b0afa0b9fb73b3ccfb"}, + {file = "lxml-4.6.4-cp37-cp37m-win32.whl", hash = "sha256:e91d24623e747eeb2d8121f4a94c6a7ad27dc48e747e2dc95bfe88632bd028a2"}, + {file = "lxml-4.6.4-cp37-cp37m-win_amd64.whl", hash = "sha256:4ec9a80dd5704ecfde54319b6964368daf02848c8954d3bacb9b64d1c7659159"}, + {file = "lxml-4.6.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:2901625f4a878a055d275beedc20ba9cb359cefc4386a967222fee29eb236038"}, + {file = "lxml-4.6.4-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b567178a74a2261345890eac66fbf394692a6e002709d329f28a673ca6042473"}, + {file = "lxml-4.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4717123f7c11c81e0da69989e5a64079c3f402b0efeb4c6241db6c369d657bd8"}, + {file = "lxml-4.6.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:cf201bf5594d1aab139fe53e3fca457e4f8204a5bbd65d48ab3b82a16f517868"}, + {file = "lxml-4.6.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a77a3470ba37e11872c75ca95baf9b3312133a3d5a5dc720803b23098c653976"}, + {file = "lxml-4.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:619c6d2b552bba00491e96c0518aad94002651c108a0f7364ff2d7798812c00e"}, + {file = "lxml-4.6.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:601f0ab75538b280aaf1e720eb9d68d4fa104ac274e1e9e6971df488f4dcdb0f"}, + {file = "lxml-4.6.4-cp38-cp38-win32.whl", hash = "sha256:75d3c5bbc0ddbad03bb68b9be638599f67e4b98ed3dcd0fec9f6f39e41ee96cb"}, + {file = "lxml-4.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:4341d135f5660db10184963d9c3418c3e28d7f868aaf8b11a323ebf85813f7f4"}, + {file = "lxml-4.6.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:9db24803fa71e3305fe4a7812782b708da21a0b774b130dd1860cf40a6d7a3ee"}, + {file = "lxml-4.6.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:afd60230ad9d8bcba005945ec3a343722f09e0b7f8ae804246e5d2cfc6bd71a6"}, + {file = "lxml-4.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:0c15e1cd55055956e77b0732270f1c6005850696bc3ef3e03d01e78af84eaa42"}, + {file = "lxml-4.6.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6d422b3c729737d8a39279a25fa156c983a56458f8b2f97661ee6fb22b80b1d6"}, + {file = "lxml-4.6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2eb90f6ec3c236ef2f1bb38aee7c0d23e77d423d395af6326e7cca637519a4cb"}, + {file = "lxml-4.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:51a0e5d243687596f46e24e464121d4b232ad772e2d1785b2a2c0eb413c285d4"}, + {file = "lxml-4.6.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d43bd68714049c84e297c005456a15ecdec818f7b5aa5868c8b0a865cfb78a44"}, + {file = "lxml-4.6.4-cp39-cp39-win32.whl", hash = "sha256:ee9e4b07b0eba4b6a521509e9e1877476729c1243246b6959de697ebea739643"}, + {file = "lxml-4.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:48eaac2991b3036175b42ee8d3c23f4cca13f2be8426bf29401a690ab58c88f4"}, + {file = "lxml-4.6.4-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:2b06a91cf7b8acea7793006e4ae50646cef0fe35ce5acd4f5cb1c77eb228e4a1"}, + {file = "lxml-4.6.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:523f195948a1ba4f9f5b7294d83c6cd876547dc741820750a7e5e893a24bbe38"}, + {file = "lxml-4.6.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b0ca0ada9d3bc18bd6f611bd001a28abdd49ab9698bd6d717f7f5394c8e94628"}, + {file = "lxml-4.6.4-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:197b7cb7a753cf553a45115739afd8458464a28913da00f5c525063f94cd3f48"}, + {file = "lxml-4.6.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:6298f5b42a26581206ef63fffa97c754245d329414108707c525512a5197f2ba"}, + {file = "lxml-4.6.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0b12c95542f04d10cba46b3ff28ea52ea56995b78cf918f0b11b05e75812bb79"}, + {file = "lxml-4.6.4.tar.gz", hash = "sha256:daf9bd1fee31f1c7a5928b3e1059e09a8d683ea58fb3ffc773b6c88cb8d1399c"}, ] markupsafe = [ + {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, @@ -1417,14 +1450,27 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, @@ -1434,29 +1480,35 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, ] matplotlib-inline = [ - {file = "matplotlib-inline-0.1.2.tar.gz", hash = "sha256:f41d5ff73c9f5385775d5c0bc13b424535c8402fe70ea8210f93e11f3683993e"}, - {file = "matplotlib_inline-0.1.2-py3-none-any.whl", hash = "sha256:5cf1176f554abb4fa98cb362aa2b55c500147e4bdbb07e3fda359143e1da0811"}, + {file = "matplotlib-inline-0.1.3.tar.gz", hash = "sha256:a04bfba22e0d1395479f866853ec1ee28eea1485c1d69a6faf00dc3e24ff34ee"}, + {file = "matplotlib_inline-0.1.3-py3-none-any.whl", hash = "sha256:aed605ba3b72462d64d475a21a9296f400a19c4f74a31b59103d2a99ffd5aa5c"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] packaging = [ - {file = "packaging-21.0-py3-none-any.whl", hash = "sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"}, - {file = "packaging-21.0.tar.gz", hash = "sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7"}, + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] parso = [ - {file = "parso-0.8.2-py2.py3-none-any.whl", hash = "sha256:a8c4922db71e4fdb90e0d0bc6e50f9b273d3397925e5e60a717e719201778d22"}, - {file = "parso-0.8.2.tar.gz", hash = "sha256:12b83492c6239ce32ff5eed6d3639d6a536170723c6f3f1506869f1ace413398"}, + {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, + {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, ] pathspec = [ - {file = "pathspec-0.8.1-py2.py3-none-any.whl", hash = "sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d"}, - {file = "pathspec-0.8.1.tar.gz", hash = "sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd"}, + {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, + {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, ] pexpect = [ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, @@ -1471,56 +1523,63 @@ pika = [ {file = "pika-1.2.0.tar.gz", hash = "sha256:f023d6ac581086b124190cb3dc81dd581a149d216fa4540ac34f9be1e3970b89"}, ] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.19-py3-none-any.whl", hash = "sha256:7089d8d2938043508aa9420ec18ce0922885304cddae87fb96eebca942299f88"}, - {file = "prompt_toolkit-3.0.19.tar.gz", hash = "sha256:08360ee3a3148bdb5163621709ee322ec34fc4375099afa4bbf751e9b7b7fa4f"}, + {file = "prompt_toolkit-3.0.24-py3-none-any.whl", hash = "sha256:e56f2ff799bacecd3e88165b1e2f5ebf9bcd59e80e06d395fa0cc4b8bd7bb506"}, + {file = "prompt_toolkit-3.0.24.tar.gz", hash = "sha256:1bb05628c7d87b645974a1bad3f17612be0c29fa39af9f7688030163f680bad6"}, ] psycopg2-binary = [ - {file = "psycopg2-binary-2.9.1.tar.gz", hash = "sha256:b0221ca5a9837e040ebf61f48899926b5783668b7807419e4adae8175a31f773"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:c250a7ec489b652c892e4f0a5d122cc14c3780f9f643e1a326754aedf82d9a76"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aef9aee84ec78af51107181d02fe8773b100b01c5dfde351184ad9223eab3698"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123c3fb684e9abfc47218d3784c7b4c47c8587951ea4dd5bc38b6636ac57f616"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:995fc41ebda5a7a663a254a1dcac52638c3e847f48307b5416ee373da15075d7"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:fbb42a541b1093385a2d8c7eec94d26d30437d0e77c1d25dae1dcc46741a385e"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-win32.whl", hash = "sha256:20f1ab44d8c352074e2d7ca67dc00843067788791be373e67a0911998787ce7d"}, - {file = "psycopg2_binary-2.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f6fac64a38f6768e7bc7b035b9e10d8a538a9fadce06b983fb3e6fa55ac5f5ce"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1e3a362790edc0a365385b1ac4cc0acc429a0c0d662d829a50b6ce743ae61b5a"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8559617b1fcf59a9aedba2c9838b5b6aa211ffedecabca412b92a1ff75aac1a"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36c7eb6152ba5467fb264d73844877be8b0847874d4822b7cf2d3c0cb8cdcb0"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:2f62c207d1740b0bde5c4e949f857b044818f734a3d57f1d0d0edc65050532ed"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:cfc523edecddaef56f6740d7de1ce24a2fdf94fd5e704091856a201872e37f9f"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-win32.whl", hash = "sha256:1e85b74cbbb3056e3656f1cc4781294df03383127a8114cbc6531e8b8367bf1e"}, - {file = "psycopg2_binary-2.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1473c0215b0613dd938db54a653f68251a45a78b05f6fc21af4326f40e8360a2"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:35c4310f8febe41f442d3c65066ca93cccefd75013df3d8c736c5b93ec288140"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c13d72ed6af7fd2c8acbd95661cf9477f94e381fce0792c04981a8283b52917"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14db1752acdd2187d99cb2ca0a1a6dfe57fc65c3281e0f20e597aac8d2a5bd90"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:aed4a9a7e3221b3e252c39d0bf794c438dc5453bc2963e8befe9d4cd324dff72"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:da113b70f6ec40e7d81b43d1b139b9db6a05727ab8be1ee559f3a69854a69d34"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-win32.whl", hash = "sha256:4235f9d5ddcab0b8dbd723dca56ea2922b485ea00e1dafacf33b0c7e840b3d32"}, - {file = "psycopg2_binary-2.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:988b47ac70d204aed01589ed342303da7c4d84b56c2f4c4b8b00deda123372bf"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:7360647ea04db2e7dff1648d1da825c8cf68dc5fbd80b8fb5b3ee9f068dcd21a"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca86db5b561b894f9e5f115d6a159fff2a2570a652e07889d8a383b5fae66eb4"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ced67f1e34e1a450cdb48eb53ca73b60aa0af21c46b9b35ac3e581cf9f00e31"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:0f2e04bd2a2ab54fa44ee67fe2d002bb90cee1c0f1cc0ebc3148af7b02034cbd"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:3242b9619de955ab44581a03a64bdd7d5e470cc4183e8fcadd85ab9d3756ce7a"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-win32.whl", hash = "sha256:0b7dae87f0b729922e06f85f667de7bf16455d411971b2043bbd9577af9d1975"}, - {file = "psycopg2_binary-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:b4d7679a08fea64573c969f6994a2631908bb2c0e69a7235648642f3d2e39a68"}, + {file = "psycopg2-binary-2.9.2.tar.gz", hash = "sha256:234b1f48488b2f86aac04fb00cb04e5e9bcb960f34fa8a8e41b73149d581a93b"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:c0e1fb7097ded2cc44d9037cfc68ad86a30341261492e7de95d180e534969fb2"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:717525cdc97b23182ff6f470fb5bf6f0bc796b5a7000c6f6699d6679991e4a5e"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3865d0cd919349c45603bd7e80249a382c5ecf8106304cfd153282adf9684b6a"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:daf6b5c62eb738872d61a1fa740d7768904911ba5a7e055ed72169d379b58beb"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:3ac83656ff4fbe7f2a956ab085e3eb1d678df54759965d509bdd6a06ce520d49"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-win32.whl", hash = "sha256:a04cfa231e7d9b63639e62166a4051cb47ca599fa341463fa3e1c48585fcee64"}, + {file = "psycopg2_binary-2.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:c6e16e085fe6dc6c099ee0be56657aa9ad71027465ef9591d302ba230c404c7e"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:53912199abb626a7249c662e72b70b4f57bf37f840599cec68625171435790dd"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:029e09a892b9ebc3c77851f69ce0720e1b72a9c6850460cee49b14dfbf9ccdd2"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db1b03c189f85b8df29030ad32d521dd7dcb862fd5f8892035314f5b886e70ce"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:2eecbdc5fa5886f2dd6cc673ce4291cc0fb8900965315268960ad9c2477f8276"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:a77e98c68b0e6c51d4d6a994d22b30e77276cbd33e4aabdde03b9ad3a2c148aa"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-win32.whl", hash = "sha256:bf31e6fdb4ec1f6d98a07f48836508ed6edd19b48b13bbf168fbc1bd014b4ca2"}, + {file = "psycopg2_binary-2.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f9c37ecb173d76cf49e519133fd70851b8f9c38b6b8c1cb7fcfc71368d4cc6fc"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:a507db7758953b1b170c4310691a1a89877029b1e11b08ba5fc8ae3ddb35596b"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e4bbcfb403221ea1953f3e0a85cef00ed15c1683a66cf35c956a7e37c33a4c4"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4dff0f15af6936c6fe6da7067b4216edbbe076ad8625da819cc066591b1133c"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:8d2aafe46eb87742425ece38130510fbb035787ee89a329af299029c4d9ae318"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:37c8f00f7a2860bac9f7a54f03c243fc1dd9b367e5b2b52f5a02e5f4e9d8c49b"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-win32.whl", hash = "sha256:ef97578fab5115e3af4334dd3376dea3c3a79328a3314b21ec7ced02920b916d"}, + {file = "psycopg2_binary-2.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7e6bd4f532c2cd297b81114526176b240109a1c52020adca69c3f3226c65dc18"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:eeee7b18c51d02e49bf1984d7af26e8843fe68e31fa1cbab5366ebdfa1c89ade"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:497372cc76e6cbce2f51b37be141f360a321423c03eb9be45524b1d123f4cd11"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671699aff57d22a245b7f4bba89e3de97dc841c5e98bd7f685429b2b20eca47"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:b9d45374ba98c1184df9cce93a0b766097544f8bdfcd5de83ff10f939c193125"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:a1852c5bef7e5f52bd43fde5eda610d4df0fb2efc31028150933e84b4140d47a"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-win32.whl", hash = "sha256:108b0380969ddab7c8ef2a813a57f87b308b2f88ec15f1a1e7b653964a3cfb25"}, + {file = "psycopg2_binary-2.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:14427437117f38e65f71db65d8eafd0e86837be456567798712b8da89db2b2dd"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:578c279cd1ce04f05ae0912530ece00bab92854911808e5aec27588aba87e361"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2dea4deac3dd3687e32daeb0712ee96c535970dfdded37a11de6a21145ab0e"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b592f09ff18cfcc9037b9a976fcd62db48cae9dbd5385f2471d4c2ba40c52b4d"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:3a320e7a804f3886a599fea507364aaafbb8387027fffcdfbd34d96316c806c7"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7585ca73dcfe326f31fafa8f96e6bb98ea9e9e46c7a1924ec8101d797914ae27"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-win32.whl", hash = "sha256:9c0aaad07941419926b9bd00171e49fe6b06e42e5527fb91671e137fe6c93d77"}, + {file = "psycopg2_binary-2.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa2847d8073951dbc84c4f8b32c620764db3c2eb0d99a04835fecfab7d04816e"}, ] ptyprocess = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] pygments = [ - {file = "Pygments-2.9.0-py3-none-any.whl", hash = "sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e"}, - {file = "Pygments-2.9.0.tar.gz", hash = "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f"}, + {file = "Pygments-2.10.0-py3-none-any.whl", hash = "sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380"}, + {file = "Pygments-2.10.0.tar.gz", hash = "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"}, ] pyjwt = [ - {file = "PyJWT-2.1.0-py3-none-any.whl", hash = "sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1"}, - {file = "PyJWT-2.1.0.tar.gz", hash = "sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130"}, + {file = "PyJWT-2.3.0-py3-none-any.whl", hash = "sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"}, + {file = "PyJWT-2.3.0.tar.gz", hash = "sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41"}, ] pyoai = [] pyparsing = [ - {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, - {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, + {file = "pyparsing-3.0.6-py3-none-any.whl", hash = "sha256:04ff808a5b90911829c55c4e26f75fa5ca8a2f5f36aa3a51f68e27033341d3e4"}, + {file = "pyparsing-3.0.6.tar.gz", hash = "sha256:d9bdec0013ef1eb5a84ab39a3b3868911598afa494f5faa038647101504e2b81"}, ] pyrsistent = [ {file = "pyrsistent-0.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"}, @@ -1546,12 +1605,12 @@ pyrsistent = [ {file = "pyrsistent-0.18.0.tar.gz", hash = "sha256:773c781216f8c2900b42a7b638d5b517bb134ae1acbebe4d1e8f1f41ea60eb4b"}, ] python-box = [ - {file = "python-box-5.3.0.tar.gz", hash = "sha256:4ed4ef5d34de505a65c01e3f1911de8cdb29484fcae0c035141dce535c6c194a"}, - {file = "python_box-5.3.0-py3-none-any.whl", hash = "sha256:f2a531f9f5bbef078c175fad6abb31e9b59d40d121ea79993197e6bb221c6be6"}, + {file = "python-box-5.4.1.tar.gz", hash = "sha256:b68e0f8abc86f3deda751b3390f64df64a0989459de51ba4db949662a7b4d8ac"}, + {file = "python_box-5.4.1-py3-none-any.whl", hash = "sha256:60ae9156de34cf92b899bd099580950df70a5b0813e67a3310a1cdd1976457fa"}, ] python-dateutil = [ - {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, - {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] python-simplexquery = [ {file = "python-simplexquery-1.0.5.3.win32-py2.6.exe", hash = "sha256:a32281dd8a923930c177c7a5b6124e4f358c02773e40e6e734a1217fb9a5ab86"}, @@ -1561,8 +1620,8 @@ python-simplexquery = [ {file = "python-simplexquery-1.0.5.3.zip", hash = "sha256:4849070678538d26778c9902c58eac13a88beaffc526e69ee5e3db3744499a2b"}, ] pytz = [ - {file = "pytz-2021.1-py2.py3-none-any.whl", hash = "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798"}, - {file = "pytz-2021.1.tar.gz", hash = "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da"}, + {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, + {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, ] pyyaml = [ {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"}, @@ -1604,67 +1663,100 @@ redis = [ {file = "redis-3.5.3.tar.gz", hash = "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2"}, ] regex = [ - {file = "regex-2021.7.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e6a1e5ca97d411a461041d057348e578dc344ecd2add3555aedba3b408c9f874"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:6afe6a627888c9a6cfbb603d1d017ce204cebd589d66e0703309b8048c3b0854"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ccb3d2190476d00414aab36cca453e4596e8f70a206e2aa8db3d495a109153d2"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:ed693137a9187052fc46eedfafdcb74e09917166362af4cc4fddc3b31560e93d"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:99d8ab206a5270c1002bfcf25c51bf329ca951e5a169f3b43214fdda1f0b5f0d"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:b85ac458354165405c8a84725de7bbd07b00d9f72c31a60ffbf96bb38d3e25fa"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:3f5716923d3d0bfb27048242a6e0f14eecdb2e2a7fac47eda1d055288595f222"}, - {file = "regex-2021.7.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5983c19d0beb6af88cb4d47afb92d96751fb3fa1784d8785b1cdf14c6519407"}, - {file = "regex-2021.7.6-cp36-cp36m-win32.whl", hash = "sha256:c92831dac113a6e0ab28bc98f33781383fe294df1a2c3dfd1e850114da35fd5b"}, - {file = "regex-2021.7.6-cp36-cp36m-win_amd64.whl", hash = "sha256:791aa1b300e5b6e5d597c37c346fb4d66422178566bbb426dd87eaae475053fb"}, - {file = "regex-2021.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:59506c6e8bd9306cd8a41511e32d16d5d1194110b8cfe5a11d102d8b63cf945d"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:564a4c8a29435d1f2256ba247a0315325ea63335508ad8ed938a4f14c4116a5d"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:59c00bb8dd8775473cbfb967925ad2c3ecc8886b3b2d0c90a8e2707e06c743f0"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:9a854b916806c7e3b40e6616ac9e85d3cdb7649d9e6590653deb5b341a736cec"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:db2b7df831c3187a37f3bb80ec095f249fa276dbe09abd3d35297fc250385694"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:173bc44ff95bc1e96398c38f3629d86fa72e539c79900283afa895694229fe6a"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:15dddb19823f5147e7517bb12635b3c82e6f2a3a6b696cc3e321522e8b9308ad"}, - {file = "regex-2021.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ddeabc7652024803666ea09f32dd1ed40a0579b6fbb2a213eba590683025895"}, - {file = "regex-2021.7.6-cp37-cp37m-win32.whl", hash = "sha256:f080248b3e029d052bf74a897b9d74cfb7643537fbde97fe8225a6467fb559b5"}, - {file = "regex-2021.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:d8bbce0c96462dbceaa7ac4a7dfbbee92745b801b24bce10a98d2f2b1ea9432f"}, - {file = "regex-2021.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edd1a68f79b89b0c57339bce297ad5d5ffcc6ae7e1afdb10f1947706ed066c9c"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux1_i686.whl", hash = "sha256:422dec1e7cbb2efbbe50e3f1de36b82906def93ed48da12d1714cabcd993d7f0"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cbe23b323988a04c3e5b0c387fe3f8f363bf06c0680daf775875d979e376bd26"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:0eb2c6e0fcec5e0f1d3bcc1133556563222a2ffd2211945d7b1480c1b1a42a6f"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:1c78780bf46d620ff4fff40728f98b8afd8b8e35c3efd638c7df67be2d5cddbf"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bc84fb254a875a9f66616ed4538542fb7965db6356f3df571d783f7c8d256edd"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:598c0a79b4b851b922f504f9f39a863d83ebdfff787261a5ed061c21e67dd761"}, - {file = "regex-2021.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875c355360d0f8d3d827e462b29ea7682bf52327d500a4f837e934e9e4656068"}, - {file = "regex-2021.7.6-cp38-cp38-win32.whl", hash = "sha256:e586f448df2bbc37dfadccdb7ccd125c62b4348cb90c10840d695592aa1b29e0"}, - {file = "regex-2021.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:2fe5e71e11a54e3355fa272137d521a40aace5d937d08b494bed4529964c19c4"}, - {file = "regex-2021.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6110bab7eab6566492618540c70edd4d2a18f40ca1d51d704f1d81c52d245026"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux1_i686.whl", hash = "sha256:4f64fc59fd5b10557f6cd0937e1597af022ad9b27d454e182485f1db3008f417"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:89e5528803566af4df368df2d6f503c84fbfb8249e6631c7b025fe23e6bd0cde"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2366fe0479ca0e9afa534174faa2beae87847d208d457d200183f28c74eaea59"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:f9392a4555f3e4cb45310a65b403d86b589adc773898c25a39184b1ba4db8985"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:2bceeb491b38225b1fee4517107b8491ba54fba77cf22a12e996d96a3c55613d"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:f98dc35ab9a749276f1a4a38ab3e0e2ba1662ce710f6530f5b0a6656f1c32b58"}, - {file = "regex-2021.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319eb2a8d0888fa6f1d9177705f341bc9455a2c8aca130016e52c7fe8d6c37a3"}, - {file = "regex-2021.7.6-cp39-cp39-win32.whl", hash = "sha256:eaf58b9e30e0e546cdc3ac06cf9165a1ca5b3de8221e9df679416ca667972035"}, - {file = "regex-2021.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:4c9c3155fe74269f61e27617529b7f09552fbb12e44b1189cebbdb24294e6e1c"}, - {file = "regex-2021.7.6.tar.gz", hash = "sha256:8394e266005f2d8c6f0bc6780001f7afa3ef81a7a2111fa35058ded6fce79e4d"}, + {file = "regex-2021.11.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9345b6f7ee578bad8e475129ed40123d265464c4cfead6c261fd60fc9de00bcf"}, + {file = "regex-2021.11.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:416c5f1a188c91e3eb41e9c8787288e707f7d2ebe66e0a6563af280d9b68478f"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0538c43565ee6e703d3a7c3bdfe4037a5209250e8502c98f20fea6f5fdf2965"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee1227cf08b6716c85504aebc49ac827eb88fcc6e51564f010f11a406c0a667"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6650f16365f1924d6014d2ea770bde8555b4a39dc9576abb95e3cd1ff0263b36"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ab804ea73972049b7a2a5c62d97687d69b5a60a67adca07eb73a0ddbc9e29f"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68a067c11463de2a37157930d8b153005085e42bcb7ad9ca562d77ba7d1404e0"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:162abfd74e88001d20cb73ceaffbfe601469923e875caf9118333b1a4aaafdc4"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b9ed0b1e5e0759d6b7f8e2f143894b2a7f3edd313f38cf44e1e15d360e11749b"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:473e67837f786404570eae33c3b64a4b9635ae9f00145250851a1292f484c063"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2fee3ed82a011184807d2127f1733b4f6b2ff6ec7151d83ef3477f3b96a13d03"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d5fd67df77bab0d3f4ea1d7afca9ef15c2ee35dfb348c7b57ffb9782a6e4db6e"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5d408a642a5484b9b4d11dea15a489ea0928c7e410c7525cd892f4d04f2f617b"}, + {file = "regex-2021.11.10-cp310-cp310-win32.whl", hash = "sha256:98ba568e8ae26beb726aeea2273053c717641933836568c2a0278a84987b2a1a"}, + {file = "regex-2021.11.10-cp310-cp310-win_amd64.whl", hash = "sha256:780b48456a0f0ba4d390e8b5f7c661fdd218934388cde1a974010a965e200e12"}, + {file = "regex-2021.11.10-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dba70f30fd81f8ce6d32ddeef37d91c8948e5d5a4c63242d16a2b2df8143aafc"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1f54b9b4b6c53369f40028d2dd07a8c374583417ee6ec0ea304e710a20f80a0"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fbb9dc00e39f3e6c0ef48edee202f9520dafb233e8b51b06b8428cfcb92abd30"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666abff54e474d28ff42756d94544cdfd42e2ee97065857413b72e8a2d6a6345"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5537f71b6d646f7f5f340562ec4c77b6e1c915f8baae822ea0b7e46c1f09b733"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2e07c6a26ed4bea91b897ee2b0835c21716d9a469a96c3e878dc5f8c55bb23"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca5f18a75e1256ce07494e245cdb146f5a9267d3c702ebf9b65c7f8bd843431e"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:74cbeac0451f27d4f50e6e8a8f3a52ca074b5e2da9f7b505c4201a57a8ed6286"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:3598893bde43091ee5ca0a6ad20f08a0435e93a69255eeb5f81b85e81e329264"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:50a7ddf3d131dc5633dccdb51417e2d1910d25cbcf842115a3a5893509140a3a"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:61600a7ca4bcf78a96a68a27c2ae9389763b5b94b63943d5158f2a377e09d29a"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:563d5f9354e15e048465061509403f68424fef37d5add3064038c2511c8f5e00"}, + {file = "regex-2021.11.10-cp36-cp36m-win32.whl", hash = "sha256:93a5051fcf5fad72de73b96f07d30bc29665697fb8ecdfbc474f3452c78adcf4"}, + {file = "regex-2021.11.10-cp36-cp36m-win_amd64.whl", hash = "sha256:b483c9d00a565633c87abd0aaf27eb5016de23fed952e054ecc19ce32f6a9e7e"}, + {file = "regex-2021.11.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fff55f3ce50a3ff63ec8e2a8d3dd924f1941b250b0aac3d3d42b687eeff07a8e"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32d2a2b02ccbef10145df9135751abea1f9f076e67a4e261b05f24b94219e36"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53db2c6be8a2710b359bfd3d3aa17ba38f8aa72a82309a12ae99d3c0c3dcd74d"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2207ae4f64ad3af399e2d30dde66f0b36ae5c3129b52885f1bffc2f05ec505c8"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5ca078bb666c4a9d1287a379fe617a6dccd18c3e8a7e6c7e1eb8974330c626a"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd33eb9bdcfbabab3459c9ee651d94c842bc8a05fabc95edf4ee0c15a072495e"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05b7d6d7e64efe309972adab77fc2af8907bb93217ec60aa9fe12a0dad35874f"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:42b50fa6666b0d50c30a990527127334d6b96dd969011e843e726a64011485da"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6e1d2cc79e8dae442b3fa4a26c5794428b98f81389af90623ffcc650ce9f6732"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:0416f7399e918c4b0e074a0f66e5191077ee2ca32a0f99d4c187a62beb47aa05"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ce298e3d0c65bd03fa65ffcc6db0e2b578e8f626d468db64fdf8457731052942"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dc07f021ee80510f3cd3af2cad5b6a3b3a10b057521d9e6aaeb621730d320c5a"}, + {file = "regex-2021.11.10-cp37-cp37m-win32.whl", hash = "sha256:e71255ba42567d34a13c03968736c5d39bb4a97ce98188fafb27ce981115beec"}, + {file = "regex-2021.11.10-cp37-cp37m-win_amd64.whl", hash = "sha256:07856afef5ffcc052e7eccf3213317fbb94e4a5cd8177a2caa69c980657b3cb4"}, + {file = "regex-2021.11.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba05430e819e58544e840a68b03b28b6d328aff2e41579037e8bab7653b37d83"}, + {file = "regex-2021.11.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7f301b11b9d214f83ddaf689181051e7f48905568b0c7017c04c06dfd065e244"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aaa4e0705ef2b73dd8e36eeb4c868f80f8393f5f4d855e94025ce7ad8525f50"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:788aef3549f1924d5c38263104dae7395bf020a42776d5ec5ea2b0d3d85d6646"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f8af619e3be812a2059b212064ea7a640aff0568d972cd1b9e920837469eb3cb"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85bfa6a5413be0ee6c5c4a663668a2cad2cbecdee367630d097d7823041bdeec"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f23222527b307970e383433daec128d769ff778d9b29343fb3496472dc20dabe"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:da1a90c1ddb7531b1d5ff1e171b4ee61f6345119be7351104b67ff413843fe94"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f5be7805e53dafe94d295399cfbe5227f39995a997f4fd8539bf3cbdc8f47ca8"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a955b747d620a50408b7fdf948e04359d6e762ff8a85f5775d907ceced715129"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:139a23d1f5d30db2cc6c7fd9c6d6497872a672db22c4ae1910be22d4f4b2068a"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ca49e1ab99593438b204e00f3970e7a5f70d045267051dfa6b5f4304fcfa1dbf"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:96fc32c16ea6d60d3ca7f63397bff5c75c5a562f7db6dec7d412f7c4d2e78ec0"}, + {file = "regex-2021.11.10-cp38-cp38-win32.whl", hash = "sha256:0617383e2fe465732af4509e61648b77cbe3aee68b6ac8c0b6fe934db90be5cc"}, + {file = "regex-2021.11.10-cp38-cp38-win_amd64.whl", hash = "sha256:a3feefd5e95871872673b08636f96b61ebef62971eab044f5124fb4dea39919d"}, + {file = "regex-2021.11.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7f325be2804246a75a4f45c72d4ce80d2443ab815063cdf70ee8fb2ca59ee1b"}, + {file = "regex-2021.11.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:537ca6a3586931b16a85ac38c08cc48f10fc870a5b25e51794c74df843e9966d"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef2afb0fd1747f33f1ee3e209bce1ed582d1896b240ccc5e2697e3275f037c7"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:432bd15d40ed835a51617521d60d0125867f7b88acf653e4ed994a1f8e4995dc"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b43c2b8a330a490daaef5a47ab114935002b13b3f9dc5da56d5322ff218eeadb"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:962b9a917dd7ceacbe5cd424556914cb0d636001e393b43dc886ba31d2a1e449"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa8c626d6441e2d04b6ee703ef2d1e17608ad44c7cb75258c09dd42bacdfc64b"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3c5fb32cc6077abad3bbf0323067636d93307c9fa93e072771cf9a64d1c0f3ef"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cd410a1cbb2d297c67d8521759ab2ee3f1d66206d2e4328502a487589a2cb21b"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e6096b0688e6e14af6a1b10eaad86b4ff17935c49aa774eac7c95a57a4e8c296"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:529801a0d58809b60b3531ee804d3e3be4b412c94b5d267daa3de7fadef00f49"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f594b96fe2e0821d026365f72ac7b4f0b487487fb3d4aaf10dd9d97d88a9737"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2409b5c9cef7054dde93a9803156b411b677affc84fca69e908b1cb2c540025d"}, + {file = "regex-2021.11.10-cp39-cp39-win32.whl", hash = "sha256:3b5df18db1fccd66de15aa59c41e4f853b5df7550723d26aa6cb7f40e5d9da5a"}, + {file = "regex-2021.11.10-cp39-cp39-win_amd64.whl", hash = "sha256:83ee89483672b11f8952b158640d0c0ff02dc43d9cb1b70c1564b49abe92ce29"}, + {file = "regex-2021.11.10.tar.gz", hash = "sha256:f341ee2df0999bfdf7a95e448075effe0db212a59387de1a70690e4acb03d4c6"}, ] requests = [ - {file = "requests-2.25.1-py2.py3-none-any.whl", hash = "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e"}, - {file = "requests-2.25.1.tar.gz", hash = "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"}, + {file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"}, + {file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"}, ] responses = [ - {file = "responses-0.13.3-py2.py3-none-any.whl", hash = "sha256:b54067596f331786f5ed094ff21e8d79e6a1c68ef625180a7d34808d6f36c11b"}, - {file = "responses-0.13.3.tar.gz", hash = "sha256:18a5b88eb24143adbf2b4100f328a2f5bfa72fbdacf12d97d41f07c26c45553d"}, + {file = "responses-0.13.4-py2.py3-none-any.whl", hash = "sha256:d8d0f655710c46fd3513b9202a7f0dcedd02ca0f8cf4976f27fa8ab5b81e656d"}, + {file = "responses-0.13.4.tar.gz", hash = "sha256:9476775d856d3c24ae660bbebe29fb6d789d4ad16acd723efbfb6ee20990b899"}, ] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] snowballstemmer = [ - {file = "snowballstemmer-2.1.0-py2.py3-none-any.whl", hash = "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2"}, - {file = "snowballstemmer-2.1.0.tar.gz", hash = "sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914"}, + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, ] sphinx = [ - {file = "Sphinx-4.0.3-py3-none-any.whl", hash = "sha256:5747f3c855028076fcff1e4df5e75e07c836f0ac11f7df886747231092cfe4ad"}, - {file = "Sphinx-4.0.3.tar.gz", hash = "sha256:dff357e6a208eb7edb2002714733ac21a9fe597e73609ff417ab8cf0c6b4fbb8"}, + {file = "Sphinx-4.3.1-py3-none-any.whl", hash = "sha256:048dac56039a5713f47a554589dc98a442b39226a2b9ed7f82797fcb2fe9253f"}, + {file = "Sphinx-4.3.1.tar.gz", hash = "sha256:32a5b3e9a1b176cc25ed048557d4d3d01af635e6b76c5bc7a43b0a34447fbd45"}, ] sphinx-autobuild = [ {file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"}, @@ -1699,12 +1791,12 @@ sphinxcontrib-serializinghtml = [ {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, ] sqlparse = [ - {file = "sqlparse-0.4.1-py3-none-any.whl", hash = "sha256:017cde379adbd6a1f15a61873f43e8274179378e95ef3fede90b5aa64d304ed0"}, - {file = "sqlparse-0.4.1.tar.gz", hash = "sha256:0f91fd2e829c44362cbcfab3e9ae12e22badaa8a29ad5ff599f9ec109f0454e8"}, + {file = "sqlparse-0.4.2-py3-none-any.whl", hash = "sha256:48719e356bb8b42991bdbb1e8b83223757b93789c00910a616a071910ca4a64d"}, + {file = "sqlparse-0.4.2.tar.gz", hash = "sha256:0c00730c74263a94e5a9919ade150dfc3b19c574389985446148402998287dae"}, ] structlog = [ - {file = "structlog-21.1.0-py2.py3-none-any.whl", hash = "sha256:62f06fc0ee32fb8580f0715eea66cb87271eb7efb0eaf9af6b639cba8981de47"}, - {file = "structlog-21.1.0.tar.gz", hash = "sha256:d9d2d890532e8db83c6977a2a676fb1889922ff0c26ad4dc0ecac26f9fafbc57"}, + {file = "structlog-21.4.0-py3-none-any.whl", hash = "sha256:6ed8fadb27cf8362be0e606f5e79ccdd3b1e879aac65f9dc0ac3033fd013a7be"}, + {file = "structlog-21.4.0.tar.gz", hash = "sha256:305a66201f9605a2e8a2595271a446f258175901c09c01e4c2c2a8ac5b68edf1"}, ] tblib = [ {file = "tblib-1.7.0-py2.py3-none-any.whl", hash = "sha256:289fa7359e580950e7d9743eab36b0691f0310fce64dee7d9c31065b8f723e23"}, @@ -1758,49 +1850,37 @@ tornado = [ {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, ] traitlets = [ - {file = "traitlets-5.0.5-py3-none-any.whl", hash = "sha256:69ff3f9d5351f31a7ad80443c2674b7099df13cc41fc5fa6e2f6d3b0330b0426"}, - {file = "traitlets-5.0.5.tar.gz", hash = "sha256:178f4ce988f69189f7e523337a3e11d91c786ded9360174a3d9ca83e79bc5396"}, + {file = "traitlets-5.1.1-py3-none-any.whl", hash = "sha256:2d313cc50a42cd6c277e7d7dc8d4d7fedd06a2c215f78766ae7b1a66277e0033"}, + {file = "traitlets-5.1.1.tar.gz", hash = "sha256:059f456c5a7c1c82b98c2e8c799f39c9b8128f6d0d46941ee118daace9eb70c7"}, ] typed-ast = [ - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win32.whl", hash = "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3"}, - {file = "typed_ast-1.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win32.whl", hash = "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7"}, - {file = "typed_ast-1.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win32.whl", hash = "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805"}, - {file = "typed_ast-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39"}, - {file = "typed_ast-1.4.3-cp38-cp38-win32.whl", hash = "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927"}, - {file = "typed_ast-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40"}, - {file = "typed_ast-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3"}, - {file = "typed_ast-1.4.3-cp39-cp39-win32.whl", hash = "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808"}, - {file = "typed_ast-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c"}, - {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, + {file = "typed_ast-1.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d8314c92414ce7481eee7ad42b353943679cf6f30237b5ecbf7d835519e1212"}, + {file = "typed_ast-1.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b53ae5de5500529c76225d18eeb060efbcec90ad5e030713fe8dab0fb4531631"}, + {file = "typed_ast-1.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:24058827d8f5d633f97223f5148a7d22628099a3d2efe06654ce872f46f07cdb"}, + {file = "typed_ast-1.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:a6d495c1ef572519a7bac9534dbf6d94c40e5b6a608ef41136133377bba4aa08"}, + {file = "typed_ast-1.5.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:de4ecae89c7d8b56169473e08f6bfd2df7f95015591f43126e4ea7865928677e"}, + {file = "typed_ast-1.5.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:256115a5bc7ea9e665c6314ed6671ee2c08ca380f9d5f130bd4d2c1f5848d695"}, + {file = "typed_ast-1.5.1-cp36-cp36m-win_amd64.whl", hash = "sha256:7c42707ab981b6cf4b73490c16e9d17fcd5227039720ca14abe415d39a173a30"}, + {file = "typed_ast-1.5.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:71dcda943a471d826ea930dd449ac7e76db7be778fcd722deb63642bab32ea3f"}, + {file = "typed_ast-1.5.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4f30a2bcd8e68adbb791ce1567fdb897357506f7ea6716f6bbdd3053ac4d9471"}, + {file = "typed_ast-1.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ca9e8300d8ba0b66d140820cf463438c8e7b4cdc6fd710c059bfcfb1531d03fb"}, + {file = "typed_ast-1.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9caaf2b440efb39ecbc45e2fabde809cbe56272719131a6318fd9bf08b58e2cb"}, + {file = "typed_ast-1.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c9bcad65d66d594bffab8575f39420fe0ee96f66e23c4d927ebb4e24354ec1af"}, + {file = "typed_ast-1.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:591bc04e507595887160ed7aa8d6785867fb86c5793911be79ccede61ae96f4d"}, + {file = "typed_ast-1.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:a80d84f535642420dd17e16ae25bb46c7f4c16ee231105e7f3eb43976a89670a"}, + {file = "typed_ast-1.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:38cf5c642fa808300bae1281460d4f9b7617cf864d4e383054a5ef336e344d32"}, + {file = "typed_ast-1.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5b6ab14c56bc9c7e3c30228a0a0b54b915b1579613f6e463ba6f4eb1382e7fd4"}, + {file = "typed_ast-1.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a2b8d7007f6280e36fa42652df47087ac7b0a7d7f09f9468f07792ba646aac2d"}, + {file = "typed_ast-1.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:b6d17f37f6edd879141e64a5db17b67488cfeffeedad8c5cec0392305e9bc775"}, + {file = "typed_ast-1.5.1.tar.gz", hash = "sha256:484137cab8ecf47e137260daa20bafbba5f4e3ec7fda1c1e69ab299b75fa81c5"}, ] typing-extensions = [ - {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"}, - {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"}, - {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"}, + {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, + {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, ] urllib3 = [ - {file = "urllib3-1.26.6-py2.py3-none-any.whl", hash = "sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4"}, - {file = "urllib3-1.26.6.tar.gz", hash = "sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f"}, + {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"}, + {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, @@ -1811,6 +1891,6 @@ xmltodict = [ {file = "xmltodict-0.12.0.tar.gz", hash = "sha256:50d8c638ed7ecb88d90561beedbf720c9b4e851a9fa6c47ebd64e99d166d8a21"}, ] zipp = [ - {file = "zipp-3.5.0-py3-none-any.whl", hash = "sha256:957cfda87797e389580cb8b9e3870841ca991e2125350677b2ca83a0e99390a3"}, - {file = "zipp-3.5.0.tar.gz", hash = "sha256:f5812b1e007e48cff63449a5e9f4e7ebea716b4111f9c4f9a645f91d579bf0c4"}, + {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, + {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, ] diff --git a/requirements.txt b/requirements.txt index f7f3a93c..05d49efb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,88 +8,87 @@ autosemver==0.5.5 babel==2.9.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" backcall==0.2.0; python_version >= "3.7" black==20.8b1; python_version >= "3.6" -certifi==2021.5.30; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" -chardet==4.0.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" -click==8.0.1; python_version >= "3.6" +certifi==2021.10.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6" +charset-normalizer==2.0.9; python_full_version >= "3.6.0" and python_version >= "3.6" +click==8.0.3; python_version >= "3.6" colorama==0.4.4; python_version >= "3.7" and python_full_version < "3.0.0" and sys_platform == "win32" and platform_system == "Windows" or sys_platform == "win32" and python_version >= "3.7" and python_full_version >= "3.5.0" and platform_system == "Windows" coverage==5.5; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") datacite==1.1.2 -decorator==5.0.9; python_version >= "3.7" -django-debug-toolbar==3.2.1; python_version >= "3.6" +decorator==5.1.0; python_version >= "3.7" +django-debug-toolbar==3.2.2; python_version >= "3.6" django-environ==0.4.5 django-rainbowtests==0.6.0 -django-split-settings==1.0.1; python_version >= "3.6" and python_version < "4.0" +django-split-settings==1.1.0; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 -django==3.1.13; python_version >= "3.6" +django==3.1.14; python_version >= "3.6" djangorestframework==3.12.4; python_version >= "3.5" docutils==0.16; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" dulwich==0.19.16 -elasticsearch==7.13.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") -executing==0.7.0 +elasticsearch==7.16.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") +executing==0.8.2 gunicorn==20.1.0; python_version >= "3.5" icecream==2.1.1 -idna==2.10; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" -idutils==1.1.8 -imagesize==1.2.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" -importlib-metadata==4.6.1; python_version >= "3.6" and python_version < "3.8" +idna==3.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +idutils==1.1.9 +imagesize==1.3.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +importlib-metadata==4.8.2; python_version >= "3.6" and python_version < "3.8" ipdb==0.13.9; python_version >= "2.7" -ipython-genutils==0.2.0; python_version >= "3.7" -ipython==7.25.0; python_version >= "3.7" +ipython==7.30.1; python_version >= "3.7" isbnid-fork==0.5.2 isodate==0.6.0 -isort==5.9.1; python_full_version >= "3.6.1" and python_version < "4.0" -jedi==0.18.0; python_version >= "3.7" -jinja2==3.0.1; python_version >= "3.6" +isort==5.10.1; python_full_version >= "3.6.1" and python_version < "4.0" +jedi==0.18.1; python_version >= "3.7" +jinja2==3.0.3; python_version >= "3.6" jsonschema==3.2.0 livereload==2.6.3; python_version >= "3.6" -lxml==4.6.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") +lxml==4.6.4; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") markupsafe==2.0.1; python_version >= "3.6" -matplotlib-inline==0.1.2; python_version >= "3.7" +matplotlib-inline==0.1.3; python_version >= "3.7" mypy-extensions==0.4.3; python_version >= "3.6" -packaging==21.0; python_version >= "3.6" -parso==0.8.2; python_version >= "3.7" -pathspec==0.8.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +packaging==21.3; python_version >= "3.6" +parso==0.8.3; python_version >= "3.7" +pathspec==0.9.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" pexpect==4.8.0; sys_platform != "win32" and python_version >= "3.7" pickleshare==0.7.5; python_version >= "3.7" pika==1.2.0 -prompt-toolkit==3.0.19; python_full_version >= "3.6.1" and python_version >= "3.7" -psycopg2-binary==2.9.1; python_version >= "3.6" +prompt-toolkit==3.0.24; python_full_version >= "3.6.2" and python_version >= "3.7" +psycopg2-binary==2.9.2; python_version >= "3.6" ptyprocess==0.7.0; sys_platform != "win32" and python_version >= "3.7" -pygments==2.9.0; python_version >= "3.7" -pyjwt==2.1.0; python_version >= "3.6" +pygments==2.10.0; python_version >= "3.7" +pyjwt==2.3.0; python_version >= "3.6" pyoai @ git+https://github.com/infrae/pyoai@5f6eba12 -pyparsing==2.4.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.6" +pyparsing==3.0.6; python_version >= "3.6" pyrsistent==0.18.0; python_version >= "3.6" -python-box==5.3.0; python_version >= "3.6" -python-dateutil==2.8.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0") +python-box==5.4.1; python_version >= "3.6" +python-dateutil==2.8.2; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0") python-simplexquery==1.0.5.3 -pytz==2021.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +pytz==2021.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" pyyaml==5.4.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.6.0") rdflib==5.0.0 redis==3.5.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") -regex==2021.7.6; python_version >= "3.6" -requests==2.25.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" -responses==0.13.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") +regex==2021.11.10; python_version >= "3.6" +requests==2.26.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +responses==0.13.4; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") six==1.16.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" -snowballstemmer==2.1.0; python_version >= "3.6" +snowballstemmer==2.2.0; python_version >= "3.6" sphinx-autobuild==2021.3.14; python_version >= "3.6" sphinx-rtd-theme==0.5.2 -sphinx==4.0.3; python_version >= "3.6" +sphinx==4.3.1; python_version >= "3.6" sphinxcontrib-applehelp==1.0.2; python_version >= "3.6" sphinxcontrib-devhelp==1.0.2; python_version >= "3.6" sphinxcontrib-htmlhelp==2.0.0; python_version >= "3.6" sphinxcontrib-jsmath==1.0.1; python_version >= "3.6" sphinxcontrib-qthelp==1.0.3; python_version >= "3.6" sphinxcontrib-serializinghtml==1.1.5; python_version >= "3.6" -sqlparse==0.4.1; python_version >= "3.6" -structlog==21.1.0; python_version >= "3.6" +sqlparse==0.4.2; python_version >= "3.6" +structlog==21.4.0; python_version >= "3.6" tblib==1.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") toml==0.10.2; python_version > "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version > "3.6" and python_version < "4" tornado==6.1; python_version >= "3.6" -traitlets==5.0.5; python_version >= "3.7" -typed-ast==1.4.3; python_version >= "3.6" -typing-extensions==3.10.0.0; python_version < "3.8" and python_version >= "3.6" -urllib3==1.26.6; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" -wcwidth==0.2.5; python_full_version >= "3.6.1" and python_version >= "3.7" +traitlets==5.1.1; python_version >= "3.7" +typed-ast==1.5.1; python_version >= "3.6" +typing-extensions==4.0.1; python_version < "3.8" and python_version >= "3.6" +urllib3==1.26.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6" +wcwidth==0.2.5; python_full_version >= "3.6.2" and python_version >= "3.7" xmltodict==0.12.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") -zipp==3.5.0; python_version >= "3.6" and python_version < "3.8" +zipp==3.6.0; python_version >= "3.6" and python_version < "3.8" From a631ad595c36d35090c01fbe41350dd067516abb Mon Sep 17 00:00:00 2001 From: tonurmi Date: Fri, 10 Dec 2021 10:51:20 +0200 Subject: [PATCH 117/160] upgrade to Django 3.2, remove asserts that use old header api. make stream response use new header api --- poetry.lock | 12 ++++++------ pyproject.toml | 2 +- requirements.txt | 2 +- src/metax_api/middleware/stream_http_response.py | 5 +++-- .../tests/api/rest/base/views/datasets/read.py | 8 ++------ .../tests/api/rest/base/views/schemas/read.py | 2 +- 6 files changed, 14 insertions(+), 17 deletions(-) diff --git a/poetry.lock b/poetry.lock index a6678110..8364db95 100644 --- a/poetry.lock +++ b/poetry.lock @@ -198,19 +198,19 @@ python-versions = ">=3.5" [[package]] name = "django" -version = "3.1.14" +version = "3.2.10" description = "A high-level Python Web framework that encourages rapid development and clean, pragmatic design." category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -asgiref = ">=3.2.10,<4" +asgiref = ">=3.3.2,<4" pytz = "*" sqlparse = ">=0.2.2" [package.extras] -argon2 = ["argon2-cffi (>=16.1.0)"] +argon2 = ["argon2-cffi (>=19.1.0)"] bcrypt = ["bcrypt"] [[package]] @@ -1133,7 +1133,7 @@ swagger = ["PyYAML"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "f902a1619b9a3a0cdc7233bee55b16b6b3167d7ce087716503df3c9bd39fe484" +content-hash = "75eef2a72ed0c8c07d248ceb0597c043bb417ddc559e60c665660101ed217acd" [metadata.files] alabaster = [ @@ -1253,8 +1253,8 @@ decorator = [ {file = "decorator-5.1.0.tar.gz", hash = "sha256:e59913af105b9860aa2c8d3272d9de5a56a4e608db9a2f167a8480b323d529a7"}, ] django = [ - {file = "Django-3.1.14-py3-none-any.whl", hash = "sha256:0fabc786489af16ad87a8c170ba9d42bfd23f7b699bd5ef05675864e8d012859"}, - {file = "Django-3.1.14.tar.gz", hash = "sha256:72a4a5a136a214c39cf016ccdd6b69e2aa08c7479c66d93f3a9b5e4bb9d8a347"}, + {file = "Django-3.2.10-py3-none-any.whl", hash = "sha256:df6f5eb3c797b27c096d61494507b7634526d4ce8d7c8ca1e57a4fb19c0738a3"}, + {file = "Django-3.2.10.tar.gz", hash = "sha256:074e8818b4b40acdc2369e67dcd6555d558329785408dcd25340ee98f1f1d5c4"}, ] django-debug-toolbar = [ {file = "django-debug-toolbar-3.2.2.tar.gz", hash = "sha256:8c5b13795d4040008ee69ba82dcdd259c49db346cf7d0de6e561a49d191f0860"}, diff --git a/pyproject.toml b/pyproject.toml index 8742b747..3677a95e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ authors = ["Your Name "] [tool.poetry.dependencies] python = "^3.7" -Django = "<3.2" +Django = "<3.3" datacite = "^1.0.1" djangorestframework = "^3.12.4" gunicorn = "^20.1.0" diff --git a/requirements.txt b/requirements.txt index 05d49efb..114ef027 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,7 @@ django-environ==0.4.5 django-rainbowtests==0.6.0 django-split-settings==1.1.0; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 -django==3.1.14; python_version >= "3.6" +django==3.2.10; python_version >= "3.6" djangorestframework==3.12.4; python_version >= "3.5" docutils==0.16; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" dulwich==0.19.16 diff --git a/src/metax_api/middleware/stream_http_response.py b/src/metax_api/middleware/stream_http_response.py index 5ca379a9..5731743c 100755 --- a/src/metax_api/middleware/stream_http_response.py +++ b/src/metax_api/middleware/stream_http_response.py @@ -38,8 +38,9 @@ def __call__(self, request): and self._check_query_param(request, "stream") ): resp = StreamingHttpResponse(self._stream_response(response)) - resp._headers["content-type"] = ("Content-Type", "application/json") - resp._headers["x-count"] = ("X-Count", str(len(response.data))) + resp.headers["content-type"] = ("Content-Type", "application/json") + + resp.headers["x-count"] = ("X-Count", str(len(response.data))) return resp return response diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index 98b255bc..ce84e9dd 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -1068,12 +1068,8 @@ def test_read_dataset_format_dummy_datacite_doi(self): def _check_dataset_xml_format_response(self, response, element_name): self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual("content-type" in response._headers, True, response._headers) - self.assertEqual( - "application/xml" in response._headers["content-type"][1], - True, - response._headers, - ) + + self.assertEqual("= 0) + # self.assertTrue(response.headers["content-type"][1].find("json") >= 0) def test_read_schema_retrieve_existing(self): list_response = self.client.get("/rest/schemas") From 44c08729739fe3104c472fe150db728b3e8f5514 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Mon, 13 Dec 2021 09:13:49 +0200 Subject: [PATCH 118/160] Added FMI catalog and eudat user --- src/metax_api/initialdata/datacatalogs.json | 75 +++++++++++++++++++ .../settings/components/access_control.py | 3 + src/metax_api/settings/components/common.py | 5 +- src/metax_api/settings/environments/stable.py | 6 +- 4 files changed, 85 insertions(+), 4 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index a0e1cdd0..aa48b0fe 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -519,4 +519,79 @@ "catalog_record_services_edit": "metax,aalto", "catalog_record_services_create": "metax,aalto", "catalog_record_services_read": "metax,aalto" +}, +{ + "catalog_json": { + "title": { + "en": "Finnish Meteorological Institute catalog", + "fi": "Ilmatieteen laitoksen katalogi", + "sv": "Meteorologiska Institutet register" + }, + "language": [ + { + "title": { + "en": "Finnish language", + "fi": "Suomen kieli", + "sv": "finska", + "und": "Suomen kieli" + }, + "identifier": "http://lexvo.org/id/iso639-3/fin" + }, + { + "title": { + "en": "English language", + "fi": "Englannin kieli", + "sv": "engelska", + "und": "Englannin kieli" + }, + "identifier": "http://lexvo.org/id/iso639-3/eng" + } + ], + "harvested": true, + "publisher": { + "name": { + "en": "Finnish Meteorological Institute", + "fi": "Ilmatieteen laitos", + "sv": "Meteorologiska Institutet" + }, + "homepage": [ + { + "title": { + "en": "Home - Finnish Meteorological Institute", + "fi": "Etusivu - Ilmatieteen laitos", + "sv": "Startsida - Meteorologiska institutet" + }, + "identifier": "https://www.ilmatieteenlaitos.fi/" + } + ], + "identifier": "https://isni.org/isni/0000000122538678" + }, + "identifier": "urn:nbn:fi:att:data-catalog-fmi", + "access_rights": { + "license": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0" + } + ], + "access_type": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } + } + ], + "description": { + "en": "Contains datasets of Finnish Meteorological Institute", + "fi": "Ilmatieteen laitoksen aineistot" + } + }, + "dataset_versioning": false, + "research_dataset_schema": "att" + }, + "catalog_record_services_edit": "metax,eudat", + "catalog_record_services_create": "metax,eudat", + "catalog_record_services_read": "metax,eudat" }] \ No newline at end of file diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 79a14ad5..4948560e 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -38,6 +38,7 @@ class Role(Enum): JYU = "jyu" REPOTRONIC = "repotronic" AALTO = "aalto" + EUDAT = "eudat" def __ge__(self, other): if self.__class__ is other.__class__: @@ -76,6 +77,8 @@ def __lt__(self, other): Role.TPAS, Role.QVAIN, Role.ETSIN, + Role.EUDAT, + Role.JYU, ] api_permissions.rest.datasets.read = [Role.ALL] api_permissions.rest.datasets["update"] = [ diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 020d7fda..b408fb67 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -14,6 +14,7 @@ DFT_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-dft" REPOTRONIC_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-repotronic" AALTO_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-acris" +FMI_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-fmi" END_USER_ALLOWED_DATA_CATALOGS = [ IDA_DATA_CATALOG_IDENTIFIER, @@ -189,7 +190,7 @@ if env("ENABLE_V2_ENDPOINTS"): API_VERSIONS_ENABLED.append("v2") -# Variables related to api credentials +# API credentials in development environment API_USERS = [ {"password": "test-metax", "username": "metax"}, {"password": "test-qvain", "username": "qvain"}, @@ -198,6 +199,8 @@ {"password": "test-etsin", "username": "etsin"}, {"password": "test-fds", "username": "fds"}, {"password": "test-download", "username": "download"}, + {"password": "test-eudat", "username": "eudat"}, + {"password": "test-jyu", "username": "jyu"}, ] SWAGGER_YAML_PATH = env('SWAGGER_YAML_PATH') diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index d08fed47..f5b563b1 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -1,9 +1,9 @@ from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 -api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO] -api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO] -api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO] +api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] +api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] +api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] api_permissions.rest.directories.read += [Role.IDA, Role.QVAIN_LIGHT] From 79589f99c2a76224891906b6f515fb9a033a9c0f Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Tue, 14 Dec 2021 09:23:38 +0200 Subject: [PATCH 119/160] Removed Aalto from legacy catalogs --- src/metax_api/settings/components/common.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 020d7fda..7ba34401 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -27,7 +27,6 @@ LEGACY_CATALOGS = [ LEGACY_DATA_CATALOG_IDENTIFIER, REPOTRONIC_DATA_CATALOG_IDENTIFIER, - AALTO_DATA_CATALOG_IDENTIFIER ] VALIDATE_TOKEN_URL = env("VALIDATE_TOKEN_URL") From f8a06ebdbf93bac3e2a544ead633315d95972d9c Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Wed, 15 Dec 2021 12:06:57 +0200 Subject: [PATCH 120/160] CSCFAIRMETA-1259: Allow tpas service to access catalogs in test data --- .../data_catalog_test_data_template.json | 6 +-- src/metax_api/tests/testdata/test_data.json | 48 +++++++++---------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/metax_api/tests/testdata/data_catalog_test_data_template.json b/src/metax_api/tests/testdata/data_catalog_test_data_template.json index decbf52a..34cc8604 100755 --- a/src/metax_api/tests/testdata/data_catalog_test_data_template.json +++ b/src/metax_api/tests/testdata/data_catalog_test_data_template.json @@ -117,7 +117,7 @@ "dataset_versioning": true }, "service_created": "metax", - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax" + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas" } \ No newline at end of file diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index c3802a10..8f513dd9 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -4537,9 +4537,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -4667,9 +4667,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -4797,9 +4797,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -4927,9 +4927,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -5057,9 +5057,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -5187,9 +5187,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -5317,9 +5317,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" @@ -5447,9 +5447,9 @@ "fi": "Testidatakatalogin nimi" } }, - "catalog_record_services_create": "testuser,api_auth_user,metax", - "catalog_record_services_edit": "testuser,api_auth_user,metax", - "catalog_record_services_read": "testuser,api_auth_user,metax", + "catalog_record_services_create": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_edit": "testuser,api_auth_user,metax,tpas", + "catalog_record_services_read": "testuser,api_auth_user,metax,tpas", "date_created": "2017-05-15T10:07:22Z", "date_modified": "2017-06-15T10:07:22Z", "service_created": "metax" From b9d8cb51458a2a3b9ffaf574b5c8758597d419fc Mon Sep 17 00:00:00 2001 From: tonurmi Date: Wed, 15 Dec 2021 12:30:34 +0200 Subject: [PATCH 121/160] poetry update & export --- poetry.lock | 149 ++++++++++++++++++++++++----------------------- requirements.txt | 10 ++-- 2 files changed, 80 insertions(+), 79 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8364db95..049711b0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -215,7 +215,7 @@ bcrypt = ["bcrypt"] [[package]] name = "django-debug-toolbar" -version = "3.2.2" +version = "3.2.3" description = "A configurable set of panels that display various debug information about the current request/response." category = "dev" optional = false @@ -265,14 +265,15 @@ django = ">=2.0" [[package]] name = "djangorestframework" -version = "3.12.4" +version = "3.13.0" description = "Web APIs for Django, made easy." category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [package.dependencies] django = ">=2.2" +pytz = "*" [[package]] name = "docutils" @@ -301,7 +302,7 @@ https = ["urllib3[secure] (>=1.24.1)"] [[package]] name = "elasticsearch" -version = "7.16.0" +version = "7.16.1" description = "Python client for Elasticsearch" category = "main" optional = false @@ -464,7 +465,7 @@ tests = ["pytest-pep8 (>=1.0.6)", "pytest (>=3.0.4)"] [[package]] name = "isodate" -version = "0.6.0" +version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" category = "main" optional = false @@ -548,7 +549,7 @@ tornado = {version = "*", markers = "python_version > \"2.7\""} [[package]] name = "lxml" -version = "4.6.4" +version = "4.7.1" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." category = "main" optional = false @@ -1257,8 +1258,8 @@ django = [ {file = "Django-3.2.10.tar.gz", hash = "sha256:074e8818b4b40acdc2369e67dcd6555d558329785408dcd25340ee98f1f1d5c4"}, ] django-debug-toolbar = [ - {file = "django-debug-toolbar-3.2.2.tar.gz", hash = "sha256:8c5b13795d4040008ee69ba82dcdd259c49db346cf7d0de6e561a49d191f0860"}, - {file = "django_debug_toolbar-3.2.2-py3-none-any.whl", hash = "sha256:d7bab7573fab35b0fd029163371b7182f5826c13da69734beb675c761d06a4d3"}, + {file = "django-debug-toolbar-3.2.3.tar.gz", hash = "sha256:95880677ea846ba1077d02305fd5e2b25e1da096e1d4a735b665e3340fa2ae79"}, + {file = "django_debug_toolbar-3.2.3-py3-none-any.whl", hash = "sha256:516702e1d71302bbc06059fa3c41efd1a3bd9cbb5580fc793343118d95b309e0"}, ] django-environ = [ {file = "django-environ-0.4.5.tar.gz", hash = "sha256:6c9d87660142608f63ec7d5ce5564c49b603ea8ff25da595fd6098f6dc82afde"}, @@ -1276,8 +1277,8 @@ django-watchman = [ {file = "django_watchman-1.2.0-py2.py3-none-any.whl", hash = "sha256:6c0b8889456ed644fcfe2f7c3294cb4ef8568a85772b7f95842e2d8c9b4bbff5"}, ] djangorestframework = [ - {file = "djangorestframework-3.12.4-py3-none-any.whl", hash = "sha256:6d1d59f623a5ad0509fe0d6bfe93cbdfe17b8116ebc8eda86d45f6e16e819aaf"}, - {file = "djangorestframework-3.12.4.tar.gz", hash = "sha256:f747949a8ddac876e879190df194b925c177cdeb725a099db1460872f7c0a7f2"}, + {file = "djangorestframework-3.13.0-py3-none-any.whl", hash = "sha256:48e64f08244fa0df9e2b8fbd405edec263d8e1251112a06d0073b546b7c86b9c"}, + {file = "djangorestframework-3.13.0.tar.gz", hash = "sha256:8b987d5683f5b3553dd946d4972048d3117fc526cb0bc01a3f021e81af53f39e"}, ] docutils = [ {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, @@ -1293,8 +1294,8 @@ dulwich = [ {file = "dulwich-0.19.16.tar.gz", hash = "sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850"}, ] elasticsearch = [ - {file = "elasticsearch-7.16.0-py2.py3-none-any.whl", hash = "sha256:9a5a2fd53a4fce28f15f358ab13fbcfb06f47fb2c7400ea89c10d6fd3f236ecd"}, - {file = "elasticsearch-7.16.0.tar.gz", hash = "sha256:d7f8665715ad80e3e99e42388bcc49c1b06162f72acfa1f8febe2baf5570b0ed"}, + {file = "elasticsearch-7.16.1-py2.py3-none-any.whl", hash = "sha256:97e9d62db71a571c540d6dfa2a51bc2263b5c0d7b9f9896eb7116ba02373c69b"}, + {file = "elasticsearch-7.16.1.tar.gz", hash = "sha256:c024ee2e7e2509c842c4e3c5e2b99a92ceecfde06d6dac2d32a19bf566c3e175"}, ] executing = [ {file = "executing-0.8.2-py2.py3-none-any.whl", hash = "sha256:32fc6077b103bd19e6494a72682d66d5763cf20a106d5aa7c5ccbea4e47b0df7"}, @@ -1335,8 +1336,8 @@ isbnid-fork = [ {file = "isbnid_fork-0.5.2.tar.gz", hash = "sha256:8d878866aa0e7f06e700a37fce586c7398ce4837da8bca39683db7028a9c3837"}, ] isodate = [ - {file = "isodate-0.6.0-py2.py3-none-any.whl", hash = "sha256:aa4d33c06640f5352aca96e4b81afd8ab3b47337cc12089822d6f322ac772c81"}, - {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, ] isort = [ {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, @@ -1358,66 +1359,66 @@ livereload = [ {file = "livereload-2.6.3.tar.gz", hash = "sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869"}, ] lxml = [ - {file = "lxml-4.6.4-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bbf2dc330bd44bfc0254ab37677ec60f7c7ecea55ad8ba1b8b2ea7bf20c265f5"}, - {file = "lxml-4.6.4-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b667c51682fe9b9788c69465956baa8b6999531876ccedcafc895c74ad716cd8"}, - {file = "lxml-4.6.4-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:72e730d33fe2e302fd07285f14624fca5e5e2fb2bb4fb2c3941e318c41c443d1"}, - {file = "lxml-4.6.4-cp27-cp27m-win32.whl", hash = "sha256:433df8c7dde0f9e41cbf4f36b0829d50a378116ef5e962ba3881f2f5f025c7be"}, - {file = "lxml-4.6.4-cp27-cp27m-win_amd64.whl", hash = "sha256:35752ee40f7bbf6adc9ff4e1f4b84794a3593736dcce80db32e3c2aa85e294ac"}, - {file = "lxml-4.6.4-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ff5bb2a198ea67403bb6818705e9a4f90e0313f2215428ec51001ce56d939fb"}, - {file = "lxml-4.6.4-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9b87727561c1150c0cc91c5d9d389448b37a7d15f0ba939ed3d1acb2f11bf6c5"}, - {file = "lxml-4.6.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:45fdb2899c755138722797161547a40b3e2a06feda620cc41195ee7e97806d81"}, - {file = "lxml-4.6.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:38b9de0de3aa689fe9fb9877ae1be1e83b8cf9621f7e62049d0436b9ecf4ad64"}, - {file = "lxml-4.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:662523cd2a0246740225c7e32531f2e766544122e58bee70e700a024cfc0cf81"}, - {file = "lxml-4.6.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:4aa349c5567651f34d4eaae7de6ed5b523f6d70a288f9c6fbac22d13a0784e04"}, - {file = "lxml-4.6.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:08eb9200d88b376a8ed5e50f1dc1d1a45b49305169674002a3b5929943390591"}, - {file = "lxml-4.6.4-cp310-cp310-win32.whl", hash = "sha256:bdc224f216ead849e902151112efef6e96c41ee1322e15d4e5f7c8a826929aee"}, - {file = "lxml-4.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:ab6db93a2b6b66cbf62b4e4a7135f476e708e8c5c990d186584142c77d7f975a"}, - {file = "lxml-4.6.4-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50790313df028aa05cf22be9a8da033b86c42fa32523e4fd944827b482b17bf0"}, - {file = "lxml-4.6.4-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6764998345552b1dfc9326a932d2bad6367c6b37a176bb73ada6b9486bf602f7"}, - {file = "lxml-4.6.4-cp35-cp35m-win32.whl", hash = "sha256:543b239b191bb3b6d9bef5f09f1fb2be5b7eb09ab4d386aa655e4d53fbe9ff47"}, - {file = "lxml-4.6.4-cp35-cp35m-win_amd64.whl", hash = "sha256:a75c1ad05eedb1a3ff2a34a52a4f0836cfaa892e12796ba39a7732c82701eff4"}, - {file = "lxml-4.6.4-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:47e955112ce64241fdb357acf0216081f9f3255b3ac9c502ca4b3323ec1ca558"}, - {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:20d7c8d90d449c6a353b15ee0459abae8395dbe59ad01e406ccbf30cd81c6f98"}, - {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:240db6f3228d26e3c6f4fad914b9ddaaf8707254e8b3efd564dc680c8ec3c264"}, - {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:351482da8dd028834028537f08724b1de22d40dcf3bb723b469446564f409074"}, - {file = "lxml-4.6.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e678a643177c0e5ec947b645fa7bc84260dfb9b6bf8fb1fdd83008dfc2ca5928"}, - {file = "lxml-4.6.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:15d0381feb56f08f78c5cc4fc385ddfe0bde1456e37f54a9322833371aec4060"}, - {file = "lxml-4.6.4-cp36-cp36m-win32.whl", hash = "sha256:4ba74afe5ee5cb5e28d83b513a6e8f0875fda1dc1a9aea42cc0065f029160d2a"}, - {file = "lxml-4.6.4-cp36-cp36m-win_amd64.whl", hash = "sha256:9c91a73971a922c13070fd8fa5a114c858251791ba2122a941e6aa781c713e44"}, - {file = "lxml-4.6.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:6020c70ff695106bf80651953a23e37718ef1fee9abd060dcad8e32ab2dc13f3"}, - {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f5dd358536b8a964bf6bd48de038754c1609e72e5f17f5d21efe2dda17594dbf"}, - {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7ae7089d81fc502df4b217ad77f03c54039fe90dac0acbe70448d7e53bfbc57e"}, - {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:80d10d53d3184837445ff8562021bdd37f57c4cadacbf9d8726cc16220a00d54"}, - {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e95da348d57eb448d226a44b868ff2ca5786fbcbe417ac99ff62d0a7d724b9c7"}, - {file = "lxml-4.6.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ffd65cfa33fed01735c82aca640fde4cc63f0414775cba11e06f84fae2085a6e"}, - {file = "lxml-4.6.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:877666418598f6cb289546c77ff87590cfd212f903b522b0afa0b9fb73b3ccfb"}, - {file = "lxml-4.6.4-cp37-cp37m-win32.whl", hash = "sha256:e91d24623e747eeb2d8121f4a94c6a7ad27dc48e747e2dc95bfe88632bd028a2"}, - {file = "lxml-4.6.4-cp37-cp37m-win_amd64.whl", hash = "sha256:4ec9a80dd5704ecfde54319b6964368daf02848c8954d3bacb9b64d1c7659159"}, - {file = "lxml-4.6.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:2901625f4a878a055d275beedc20ba9cb359cefc4386a967222fee29eb236038"}, - {file = "lxml-4.6.4-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b567178a74a2261345890eac66fbf394692a6e002709d329f28a673ca6042473"}, - {file = "lxml-4.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4717123f7c11c81e0da69989e5a64079c3f402b0efeb4c6241db6c369d657bd8"}, - {file = "lxml-4.6.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:cf201bf5594d1aab139fe53e3fca457e4f8204a5bbd65d48ab3b82a16f517868"}, - {file = "lxml-4.6.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a77a3470ba37e11872c75ca95baf9b3312133a3d5a5dc720803b23098c653976"}, - {file = "lxml-4.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:619c6d2b552bba00491e96c0518aad94002651c108a0f7364ff2d7798812c00e"}, - {file = "lxml-4.6.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:601f0ab75538b280aaf1e720eb9d68d4fa104ac274e1e9e6971df488f4dcdb0f"}, - {file = "lxml-4.6.4-cp38-cp38-win32.whl", hash = "sha256:75d3c5bbc0ddbad03bb68b9be638599f67e4b98ed3dcd0fec9f6f39e41ee96cb"}, - {file = "lxml-4.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:4341d135f5660db10184963d9c3418c3e28d7f868aaf8b11a323ebf85813f7f4"}, - {file = "lxml-4.6.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:9db24803fa71e3305fe4a7812782b708da21a0b774b130dd1860cf40a6d7a3ee"}, - {file = "lxml-4.6.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:afd60230ad9d8bcba005945ec3a343722f09e0b7f8ae804246e5d2cfc6bd71a6"}, - {file = "lxml-4.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:0c15e1cd55055956e77b0732270f1c6005850696bc3ef3e03d01e78af84eaa42"}, - {file = "lxml-4.6.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6d422b3c729737d8a39279a25fa156c983a56458f8b2f97661ee6fb22b80b1d6"}, - {file = "lxml-4.6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2eb90f6ec3c236ef2f1bb38aee7c0d23e77d423d395af6326e7cca637519a4cb"}, - {file = "lxml-4.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:51a0e5d243687596f46e24e464121d4b232ad772e2d1785b2a2c0eb413c285d4"}, - {file = "lxml-4.6.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d43bd68714049c84e297c005456a15ecdec818f7b5aa5868c8b0a865cfb78a44"}, - {file = "lxml-4.6.4-cp39-cp39-win32.whl", hash = "sha256:ee9e4b07b0eba4b6a521509e9e1877476729c1243246b6959de697ebea739643"}, - {file = "lxml-4.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:48eaac2991b3036175b42ee8d3c23f4cca13f2be8426bf29401a690ab58c88f4"}, - {file = "lxml-4.6.4-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:2b06a91cf7b8acea7793006e4ae50646cef0fe35ce5acd4f5cb1c77eb228e4a1"}, - {file = "lxml-4.6.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:523f195948a1ba4f9f5b7294d83c6cd876547dc741820750a7e5e893a24bbe38"}, - {file = "lxml-4.6.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b0ca0ada9d3bc18bd6f611bd001a28abdd49ab9698bd6d717f7f5394c8e94628"}, - {file = "lxml-4.6.4-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:197b7cb7a753cf553a45115739afd8458464a28913da00f5c525063f94cd3f48"}, - {file = "lxml-4.6.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:6298f5b42a26581206ef63fffa97c754245d329414108707c525512a5197f2ba"}, - {file = "lxml-4.6.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0b12c95542f04d10cba46b3ff28ea52ea56995b78cf918f0b11b05e75812bb79"}, - {file = "lxml-4.6.4.tar.gz", hash = "sha256:daf9bd1fee31f1c7a5928b3e1059e09a8d683ea58fb3ffc773b6c88cb8d1399c"}, + {file = "lxml-4.7.1-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:d546431636edb1d6a608b348dd58cc9841b81f4116745857b6cb9f8dadb2725f"}, + {file = "lxml-4.7.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6308062534323f0d3edb4e702a0e26a76ca9e0e23ff99be5d82750772df32a9e"}, + {file = "lxml-4.7.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f76dbe44e31abf516114f6347a46fa4e7c2e8bceaa4b6f7ee3a0a03c8eba3c17"}, + {file = "lxml-4.7.1-cp27-cp27m-win32.whl", hash = "sha256:d5618d49de6ba63fe4510bdada62d06a8acfca0b4b5c904956c777d28382b419"}, + {file = "lxml-4.7.1-cp27-cp27m-win_amd64.whl", hash = "sha256:9393a05b126a7e187f3e38758255e0edf948a65b22c377414002d488221fdaa2"}, + {file = "lxml-4.7.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50d3dba341f1e583265c1a808e897b4159208d814ab07530202b6036a4d86da5"}, + {file = "lxml-4.7.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44f552e0da3c8ee3c28e2eb82b0b784200631687fc6a71277ea8ab0828780e7d"}, + {file = "lxml-4.7.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:e662c6266e3a275bdcb6bb049edc7cd77d0b0f7e119a53101d367c841afc66dc"}, + {file = "lxml-4.7.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4c093c571bc3da9ebcd484e001ba18b8452903cd428c0bc926d9b0141bcb710e"}, + {file = "lxml-4.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:3e26ad9bc48d610bf6cc76c506b9e5ad9360ed7a945d9be3b5b2c8535a0145e3"}, + {file = "lxml-4.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a5f623aeaa24f71fce3177d7fee875371345eb9102b355b882243e33e04b7175"}, + {file = "lxml-4.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7b5e2acefd33c259c4a2e157119c4373c8773cf6793e225006a1649672ab47a6"}, + {file = "lxml-4.7.1-cp310-cp310-win32.whl", hash = "sha256:67fa5f028e8a01e1d7944a9fb616d1d0510d5d38b0c41708310bd1bc45ae89f6"}, + {file = "lxml-4.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:b1d381f58fcc3e63fcc0ea4f0a38335163883267f77e4c6e22d7a30877218a0e"}, + {file = "lxml-4.7.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:38d9759733aa04fb1697d717bfabbedb21398046bd07734be7cccc3d19ea8675"}, + {file = "lxml-4.7.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:dfd0d464f3d86a1460683cd742306d1138b4e99b79094f4e07e1ca85ee267fe7"}, + {file = "lxml-4.7.1-cp35-cp35m-win32.whl", hash = "sha256:534e946bce61fd162af02bad7bfd2daec1521b71d27238869c23a672146c34a5"}, + {file = "lxml-4.7.1-cp35-cp35m-win_amd64.whl", hash = "sha256:6ec829058785d028f467be70cd195cd0aaf1a763e4d09822584ede8c9eaa4b03"}, + {file = "lxml-4.7.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:ade74f5e3a0fd17df5782896ddca7ddb998845a5f7cd4b0be771e1ffc3b9aa5b"}, + {file = "lxml-4.7.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41358bfd24425c1673f184d7c26c6ae91943fe51dfecc3603b5e08187b4bcc55"}, + {file = "lxml-4.7.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6e56521538f19c4a6690f439fefed551f0b296bd785adc67c1777c348beb943d"}, + {file = "lxml-4.7.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5b0f782f0e03555c55e37d93d7a57454efe7495dab33ba0ccd2dbe25fc50f05d"}, + {file = "lxml-4.7.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:490712b91c65988012e866c411a40cc65b595929ececf75eeb4c79fcc3bc80a6"}, + {file = "lxml-4.7.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:34c22eb8c819d59cec4444d9eebe2e38b95d3dcdafe08965853f8799fd71161d"}, + {file = "lxml-4.7.1-cp36-cp36m-win32.whl", hash = "sha256:2a906c3890da6a63224d551c2967413b8790a6357a80bf6b257c9a7978c2c42d"}, + {file = "lxml-4.7.1-cp36-cp36m-win_amd64.whl", hash = "sha256:36b16fecb10246e599f178dd74f313cbdc9f41c56e77d52100d1361eed24f51a"}, + {file = "lxml-4.7.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:a5edc58d631170de90e50adc2cc0248083541affef82f8cd93bea458e4d96db8"}, + {file = "lxml-4.7.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:87c1b0496e8c87ec9db5383e30042357b4839b46c2d556abd49ec770ce2ad868"}, + {file = "lxml-4.7.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:0a5f0e4747f31cff87d1eb32a6000bde1e603107f632ef4666be0dc065889c7a"}, + {file = "lxml-4.7.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:bf6005708fc2e2c89a083f258b97709559a95f9a7a03e59f805dd23c93bc3986"}, + {file = "lxml-4.7.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc15874816b9320581133ddc2096b644582ab870cf6a6ed63684433e7af4b0d3"}, + {file = "lxml-4.7.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0b5e96e25e70917b28a5391c2ed3ffc6156513d3db0e1476c5253fcd50f7a944"}, + {file = "lxml-4.7.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ec9027d0beb785a35aa9951d14e06d48cfbf876d8ff67519403a2522b181943b"}, + {file = "lxml-4.7.1-cp37-cp37m-win32.whl", hash = "sha256:9fbc0dee7ff5f15c4428775e6fa3ed20003140560ffa22b88326669d53b3c0f4"}, + {file = "lxml-4.7.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1104a8d47967a414a436007c52f533e933e5d52574cab407b1e49a4e9b5ddbd1"}, + {file = "lxml-4.7.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:fc9fb11b65e7bc49f7f75aaba1b700f7181d95d4e151cf2f24d51bfd14410b77"}, + {file = "lxml-4.7.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:317bd63870b4d875af3c1be1b19202de34c32623609ec803b81c99193a788c1e"}, + {file = "lxml-4.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:610807cea990fd545b1559466971649e69302c8a9472cefe1d6d48a1dee97440"}, + {file = "lxml-4.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:09b738360af8cb2da275998a8bf79517a71225b0de41ab47339c2beebfff025f"}, + {file = "lxml-4.7.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6a2ab9d089324d77bb81745b01f4aeffe4094306d939e92ba5e71e9a6b99b71e"}, + {file = "lxml-4.7.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eed394099a7792834f0cb4a8f615319152b9d801444c1c9e1b1a2c36d2239f9e"}, + {file = "lxml-4.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:735e3b4ce9c0616e85f302f109bdc6e425ba1670a73f962c9f6b98a6d51b77c9"}, + {file = "lxml-4.7.1-cp38-cp38-win32.whl", hash = "sha256:772057fba283c095db8c8ecde4634717a35c47061d24f889468dc67190327bcd"}, + {file = "lxml-4.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:13dbb5c7e8f3b6a2cf6e10b0948cacb2f4c9eb05029fe31c60592d08ac63180d"}, + {file = "lxml-4.7.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:718d7208b9c2d86aaf0294d9381a6acb0158b5ff0f3515902751404e318e02c9"}, + {file = "lxml-4.7.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:5bee1b0cbfdb87686a7fb0e46f1d8bd34d52d6932c0723a86de1cc532b1aa489"}, + {file = "lxml-4.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e410cf3a2272d0a85526d700782a2fa92c1e304fdcc519ba74ac80b8297adf36"}, + {file = "lxml-4.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:585ea241ee4961dc18a95e2f5581dbc26285fcf330e007459688096f76be8c42"}, + {file = "lxml-4.7.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a555e06566c6dc167fbcd0ad507ff05fd9328502aefc963cb0a0547cfe7f00db"}, + {file = "lxml-4.7.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:adaab25be351fff0d8a691c4f09153647804d09a87a4e4ea2c3f9fe9e8651851"}, + {file = "lxml-4.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:82d16a64236970cb93c8d63ad18c5b9f138a704331e4b916b2737ddfad14e0c4"}, + {file = "lxml-4.7.1-cp39-cp39-win32.whl", hash = "sha256:59e7da839a1238807226f7143c68a479dee09244d1b3cf8c134f2fce777d12d0"}, + {file = "lxml-4.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:a1bbc4efa99ed1310b5009ce7f3a1784698082ed2c1ef3895332f5df9b3b92c2"}, + {file = "lxml-4.7.1-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:0607ff0988ad7e173e5ddf7bf55ee65534bd18a5461183c33e8e41a59e89edf4"}, + {file = "lxml-4.7.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:6c198bfc169419c09b85ab10cb0f572744e686f40d1e7f4ed09061284fc1303f"}, + {file = "lxml-4.7.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a58d78653ae422df6837dd4ca0036610b8cb4962b5cfdbd337b7b24de9e5f98a"}, + {file = "lxml-4.7.1-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:e18281a7d80d76b66a9f9e68a98cf7e1d153182772400d9a9ce855264d7d0ce7"}, + {file = "lxml-4.7.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8e54945dd2eeb50925500957c7c579df3cd07c29db7810b83cf30495d79af267"}, + {file = "lxml-4.7.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:447d5009d6b5447b2f237395d0018901dcc673f7d9f82ba26c1b9f9c3b444b60"}, + {file = "lxml-4.7.1.tar.gz", hash = "sha256:a1613838aa6b89af4ba10a0f3a972836128801ed008078f8c1244e65958f1b24"}, ] markupsafe = [ {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, diff --git a/requirements.txt b/requirements.txt index 114ef027..82945848 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,16 +15,16 @@ colorama==0.4.4; python_version >= "3.7" and python_full_version < "3.0.0" and s coverage==5.5; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") datacite==1.1.2 decorator==5.1.0; python_version >= "3.7" -django-debug-toolbar==3.2.2; python_version >= "3.6" +django-debug-toolbar==3.2.3; python_version >= "3.6" django-environ==0.4.5 django-rainbowtests==0.6.0 django-split-settings==1.1.0; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 django==3.2.10; python_version >= "3.6" -djangorestframework==3.12.4; python_version >= "3.5" +djangorestframework==3.13.0; python_version >= "3.6" docutils==0.16; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" dulwich==0.19.16 -elasticsearch==7.16.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") +elasticsearch==7.16.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0" and python_version < "4") executing==0.8.2 gunicorn==20.1.0; python_version >= "3.5" icecream==2.1.1 @@ -35,13 +35,13 @@ importlib-metadata==4.8.2; python_version >= "3.6" and python_version < "3.8" ipdb==0.13.9; python_version >= "2.7" ipython==7.30.1; python_version >= "3.7" isbnid-fork==0.5.2 -isodate==0.6.0 +isodate==0.6.1 isort==5.10.1; python_full_version >= "3.6.1" and python_version < "4.0" jedi==0.18.1; python_version >= "3.7" jinja2==3.0.3; python_version >= "3.6" jsonschema==3.2.0 livereload==2.6.3; python_version >= "3.6" -lxml==4.6.4; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") +lxml==4.7.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") markupsafe==2.0.1; python_version >= "3.6" matplotlib-inline==0.1.3; python_version >= "3.7" mypy-extensions==0.4.3; python_version >= "3.6" From dee3fea1747be91e8f6a945a172e75ff920474d0 Mon Sep 17 00:00:00 2001 From: tonurmi Date: Wed, 15 Dec 2021 12:53:19 +0200 Subject: [PATCH 122/160] remove rainbowtests --- poetry.lock | 16 +--------------- pyproject.toml | 1 - src/metax_api/settings/components/common.py | 4 ---- 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index 049711b0..8be0ea6c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -233,17 +233,6 @@ category = "main" optional = false python-versions = "*" -[[package]] -name = "django-rainbowtests" -version = "0.6.0" -description = "A colorful Django Test Runner." -category = "dev" -optional = false -python-versions = "*" - -[package.dependencies] -django = "*" - [[package]] name = "django-split-settings" version = "1.1.0" @@ -1134,7 +1123,7 @@ swagger = ["PyYAML"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "75eef2a72ed0c8c07d248ceb0597c043bb417ddc559e60c665660101ed217acd" +content-hash = "c0f6e79d018c633ee4c3b935d5253e27d9e155e0f2b5a33f0e9d0a67b62227a3" [metadata.files] alabaster = [ @@ -1265,9 +1254,6 @@ django-environ = [ {file = "django-environ-0.4.5.tar.gz", hash = "sha256:6c9d87660142608f63ec7d5ce5564c49b603ea8ff25da595fd6098f6dc82afde"}, {file = "django_environ-0.4.5-py2.py3-none-any.whl", hash = "sha256:c57b3c11ec1f319d9474e3e5a79134f40174b17c7cc024bbb2fad84646b120c4"}, ] -django-rainbowtests = [ - {file = "django-rainbowtests-0.6.0.tar.gz", hash = "sha256:0700ee1386935822dca296d323d67b0563cb2e5012b553ebca7c9391f2298cd9"}, -] django-split-settings = [ {file = "django-split-settings-1.1.0.tar.gz", hash = "sha256:6b3aed89667a95525152026eab93a9f038ff22df6883006318b8b4a3d0ca6888"}, {file = "django_split_settings-1.1.0-py3-none-any.whl", hash = "sha256:5d97ae64cf9ed14a831722d82ac725944667ac8c08307b7cfd22e91367b411d0"}, diff --git a/pyproject.toml b/pyproject.toml index 3677a95e..7a0af20b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,6 @@ django-watchman = "^1.2.0" icecream = "^2.1.0" black = {version = "^20.8b1", allow-prereleases = true} tblib = "^1.7.0" -django-rainbowtests = "^0.6.0" django-debug-toolbar = "^3.2" PyJWT = "^2.0.1" ipdb = "^0.13.7" diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 020d7fda..0e976d5a 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -145,10 +145,6 @@ DATABASES["default"]["ENGINE"] = "django.db.backends.postgresql" DATABASES["default"]["ATOMIC_REQUESTS"] = True -# Colorize automated test console output -RAINBOWTESTS_HIGHLIGHT_PATH = str(BASE_DIR) -TEST_RUNNER = "rainbowtests.test.runner.RainbowDiscoverRunner" - # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ From 4340111bf805aa28973e3dac206ef3fd5ebd648d Mon Sep 17 00:00:00 2001 From: tonurmi Date: Wed, 15 Dec 2021 12:54:37 +0200 Subject: [PATCH 123/160] remove rainbowtests from requirements.txt --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 82945848..9c5f41ea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,6 @@ datacite==1.1.2 decorator==5.1.0; python_version >= "3.7" django-debug-toolbar==3.2.3; python_version >= "3.6" django-environ==0.4.5 -django-rainbowtests==0.6.0 django-split-settings==1.1.0; python_version >= "3.6" and python_version < "4.0" django-watchman==1.2.0 django==3.2.10; python_version >= "3.6" From 4993e0d52220e484f13197065ed67a9650c18238 Mon Sep 17 00:00:00 2001 From: Ismo Torvinen Date: Thu, 16 Dec 2021 08:07:07 +0000 Subject: [PATCH 124/160] CSCFAIRMETA-1208: Convert editor permissions id to uuid --- src/metax_api/api/rest/base/router.py | 6 + .../api/rest/base/serializers/__init__.py | 1 + .../editor_permissions_serializer.py | 20 + src/metax_api/api/rest/base/views/__init__.py | 1 + .../api/rest/base/views/common_view.py | 4 +- .../api/rest/base/views/dataset_view.py | 1 + .../base/views/editor_permissions_view.py | 121 +++++ src/metax_api/api/rest/v2/router.py | 6 + .../migrations/0041_add_editorpermissions.py | 195 ++++++++ .../migrations/0042_auto_20211108_1256.py | 21 + ...43_remove_editoruserpermission_verified.py | 17 + src/metax_api/models/__init__.py | 2 +- src/metax_api/models/catalog_record.py | 107 ++++- src/metax_api/models/catalog_record_v2.py | 6 + .../services/catalog_record_service.py | 20 + .../settings/components/access_control.py | 24 + src/metax_api/swagger/v1/swagger.yaml | 170 ++++++- src/metax_api/swagger/v2/swagger.yaml | 164 ++++++- .../api/rest/base/views/datasets/read.py | 13 + .../api/rest/base/views/datasets/write.py | 17 + .../base/views/editorpermissions/__init__.py | 2 + .../rest/base/views/editorpermissions/read.py | 90 ++++ .../base/views/editorpermissions/write.py | 182 ++++++++ .../api/rest/v2/views/datasets/drafts.py | 16 + .../tests/api/rpc/v2/views/dataset_rpc.py | 15 + .../tests/testdata/generate_test_data.py | 88 ++-- src/metax_api/tests/testdata/test_data.json | 418 ++++++++++++++++++ 27 files changed, 1679 insertions(+), 48 deletions(-) create mode 100644 src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py create mode 100644 src/metax_api/api/rest/base/views/editor_permissions_view.py create mode 100644 src/metax_api/migrations/0041_add_editorpermissions.py create mode 100644 src/metax_api/migrations/0042_auto_20211108_1256.py create mode 100644 src/metax_api/migrations/0043_remove_editoruserpermission_verified.py create mode 100644 src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py create mode 100644 src/metax_api/tests/api/rest/base/views/editorpermissions/read.py create mode 100644 src/metax_api/tests/api/rest/base/views/editorpermissions/write.py diff --git a/src/metax_api/api/rest/base/router.py b/src/metax_api/api/rest/base/router.py index 742103a3..ac745be9 100755 --- a/src/metax_api/api/rest/base/router.py +++ b/src/metax_api/api/rest/base/router.py @@ -27,6 +27,7 @@ DataCatalogViewSet, DatasetViewSet, DirectoryViewSet, + EditorPermissionViewSet, FileStorageViewSet, FileViewSet, SchemaViewSet, @@ -83,4 +84,9 @@ def get_default_basename(self, viewset): DatasetViewSet, ) +router.register( + "datasets/(?P.+)/editor_permissions/users", + EditorPermissionViewSet, +) + api_urlpatterns = router.urls diff --git a/src/metax_api/api/rest/base/serializers/__init__.py b/src/metax_api/api/rest/base/serializers/__init__.py index 1c243720..84d9c24b 100755 --- a/src/metax_api/api/rest/base/serializers/__init__.py +++ b/src/metax_api/api/rest/base/serializers/__init__.py @@ -13,3 +13,4 @@ from .file_storage_serializer import FileStorageSerializer from .serializer_utils import validate_json from .xml_metadata_serializer import XmlMetadataSerializer +from .editor_permissions_serializer import EditorPermissionsSerializer diff --git a/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py new file mode 100644 index 00000000..b306b416 --- /dev/null +++ b/src/metax_api/api/rest/base/serializers/editor_permissions_serializer.py @@ -0,0 +1,20 @@ +from django.core.validators import EMPTY_VALUES + +from rest_framework.serializers import ValidationError, ModelSerializer + +from metax_api.models import EditorUserPermission + +from .common_serializer import CommonSerializer + + +class EditorPermissionsSerializer(ModelSerializer): + class Meta: + model = EditorUserPermission + fields = "__all__" + + extra_kwargs = CommonSerializer.Meta.extra_kwargs + + def validate(self, attrs): + data = ModelSerializer.validate(self, attrs) + + return data diff --git a/src/metax_api/api/rest/base/views/__init__.py b/src/metax_api/api/rest/base/views/__init__.py index d5c62dd9..027ae76f 100755 --- a/src/metax_api/api/rest/base/views/__init__.py +++ b/src/metax_api/api/rest/base/views/__init__.py @@ -10,6 +10,7 @@ from .data_catalog_view import DataCatalogViewSet from .dataset_view import DatasetViewSet from .directory_view import DirectoryViewSet +from .editor_permissions_view import EditorPermissionViewSet from .file_storage_view import FileStorageViewSet from .file_view import FileViewSet from .schema_view import SchemaViewSet diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index f5f0be75..cb28087a 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -377,7 +377,9 @@ def _check_and_store_bulk_error(self, request, response): """ if "failed" in response.data and len(response.data["failed"]): try: - error_json = ApiErrorSerializerV2.request_to_json(self.request, response, other={"bulk_request": True}) + error_json = ApiErrorSerializerV2.request_to_json( + self.request, response, other={"bulk_request": True} + ) response.data["error_identifier"] = error_json["identifier"] if settings.ENABLE_API_ERROR_OBJECTS: rabbitmq.publish(error_json, exchange="apierrors") diff --git a/src/metax_api/api/rest/base/views/dataset_view.py b/src/metax_api/api/rest/base/views/dataset_view.py index bd416afa..1aeaab0b 100755 --- a/src/metax_api/api/rest/base/views/dataset_view.py +++ b/src/metax_api/api/rest/base/views/dataset_view.py @@ -10,6 +10,7 @@ from django.conf import settings from django.http import Http404 + from rest_framework import status from rest_framework.decorators import action from rest_framework.response import Response diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py new file mode 100644 index 00000000..2ff433c1 --- /dev/null +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -0,0 +1,121 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT +import datetime +import logging + +from django.core.validators import EMPTY_VALUES + +from django.shortcuts import get_object_or_404 +from rest_framework import status + +from rest_framework.response import Response + +from metax_api.models import CatalogRecord +from metax_api.models.catalog_record import PermissionRole, EditorUserPermission +from metax_api.permissions import ServicePermissions +from metax_api.services import CommonService + +from ..serializers import EditorPermissionsSerializer +from .common_view import CommonViewSet + +_logger = logging.getLogger(__name__) + + +class EditorPermissionViewSet(CommonViewSet): + lookup_field = "user_id" + permission_classes = [ServicePermissions,] + serializer_class = EditorPermissionsSerializer + + def __init__(self, *args, **kwargs): + super(EditorPermissionViewSet, self).__init__(*args, **kwargs) + + def get_queryset(self): + if CommonService.is_primary_key(self.kwargs['cr_identifier']): + cr = get_object_or_404(CatalogRecord, pk=int(self.kwargs['cr_identifier'])) + else: + cr = get_object_or_404(CatalogRecord, identifier=self.kwargs['cr_identifier']) + return cr.editor_permissions.users + + def list(self, request, *args, **kwargs): + users = self.get_queryset() + editorserializer = EditorPermissionsSerializer(users.all(), many=True) + return Response(editorserializer.data) + + def create(self, request, *args, **kwargs): + data = request.data + + perms_id = self.get_queryset().instance.id + if "user_id" not in data: + return Response({"user_id": "Missing user_id"}, status=status.HTTP_400_BAD_REQUEST) + + editorserializer = None + try: + removed_user = EditorUserPermission.objects_unfiltered.get( + user_id=data.get("user_id"), editor_permissions_id=perms_id + ) + except EditorUserPermission.DoesNotExist: + removed_user = None + + if removed_user not in EMPTY_VALUES and removed_user.removed is True: + data['date_modified'] = datetime.datetime.now() + data['date_removed'] = None + data['removed'] = False + editorserializer = EditorPermissionsSerializer(removed_user, data=data, partial=True) + elif removed_user not in EMPTY_VALUES and removed_user.removed is False: + return Response( + {'user_id': "User_id already exists"}, status=status.HTTP_400_BAD_REQUEST + ) + else: + data['editor_permissions'] = perms_id + data['date_created'] = datetime.datetime.now() + editorserializer = EditorPermissionsSerializer(data=data) + if editorserializer.is_valid(): + editorserializer.save() + return Response(editorserializer.data, status=status.HTTP_201_CREATED) + else: + return Response(editorserializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def destroy(self, request, *args, **kwargs): + user = self.get_object() + users = self.get_queryset() + + creators = users.filter(role=PermissionRole.CREATOR, removed=False).count() + if user.role == PermissionRole.CREATOR and creators < 2: + return Response( + {"error": "Can't delete last creator"}, status=status.HTTP_400_BAD_REQUEST + ) + else: + user.remove() + return Response(status=status.HTTP_200_OK) + + def partial_update(self, request, **kwargs): + data = request.data + user = self.get_object() + users = self.get_queryset() + + if 'role' in data and user.role == PermissionRole.CREATOR: + creators = users.filter(role=PermissionRole.CREATOR, removed=False).count() + if creators < 2 and data.get('role') != PermissionRole.CREATOR: + return Response({"error": "Can't change last creator"}, status=status.HTTP_400_BAD_REQUEST) + + data['date_modified'] = datetime.datetime.now() + serializer = EditorPermissionsSerializer(user, data=data, partial=True) + if serializer.is_valid(): + serializer.save() + return Response(serializer.data) + else: + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + def update_bulk(self, request, *args, **kwargs): + return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) + + def partial_update_bulk(self, request, *args, **kwargs): + return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) + + def destroy_bulk(self, request, *args, **kwargs): + return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) + diff --git a/src/metax_api/api/rest/v2/router.py b/src/metax_api/api/rest/v2/router.py index 46e703d5..10bcd0c9 100755 --- a/src/metax_api/api/rest/v2/router.py +++ b/src/metax_api/api/rest/v2/router.py @@ -29,6 +29,7 @@ FileStorageViewSet, FileViewSet, SchemaViewSet, + EditorPermissionViewSet, ) from metax_api.api.rest.v2.views import ApiErrorViewSetV2 @@ -110,4 +111,9 @@ def __init__(self, *args, **kwargs): DatasetViewSet, ) +router_v2.register( + "datasets/(?P.+)/editor_permissions/users", + EditorPermissionViewSet, +) + api_urlpatterns = router_v1.urls + router_v2.urls diff --git a/src/metax_api/migrations/0041_add_editorpermissions.py b/src/metax_api/migrations/0041_add_editorpermissions.py new file mode 100644 index 00000000..96a38c29 --- /dev/null +++ b/src/metax_api/migrations/0041_add_editorpermissions.py @@ -0,0 +1,195 @@ +# Generated by Django 3.1.13 on 2021-08-26 11:14 + +from django.db import migrations, models +import django.db.models.deletion +from django.utils import timezone +import logging +import uuid +from metax_api.models import catalog_record + +logger = logging.getLogger(__name__) + + +def add_permissions(apps, schema_editor): + """ + Add EditorPermissions for each version set and related next_draft CatalogRecords. + + Here CatalogRecords not belonging to a DatasetVersionSet are considered a version set with size 1. + """ + CatalogRecord = apps.get_model("metax_api", "CatalogRecordV2") + EditorUserPermission = apps.get_model("metax_api", "EditorUserPermission") + EditorPermissions = apps.get_model("metax_api", "EditorPermissions") + + num_perms = 0 + num_datasets = 0 + prev_version_set_id = None + version_set_users = [] + version_set_crs = [] + + def flush_version_set(): + """Create single EditorPermissions object for each version set, add creator user""" + nonlocal num_perms, num_datasets, version_set_crs, version_set_users + if len(version_set_crs) > 0: + permissions = EditorPermissions.objects.create() + permissions.catalog_records.add(*version_set_crs) + num_perms += 1 + num_datasets += len(version_set_crs) + + for user in version_set_users: + now = timezone.now().replace(microsecond=0) + EditorUserPermission.objects.create( + editor_permissions=permissions, + user_id=user, + verified=True, + role="creator", + date_created=now, + ) + version_set_users = [] + version_set_crs = [] + + # group datasets by version_sets and include their next_draft datasets + for cr in CatalogRecord.objects.filter(draft_of__isnull=True).order_by( + "dataset_version_set_id", "id" + ): + if cr.dataset_version_set_id is None or cr.dataset_version_set_id != prev_version_set_id: + flush_version_set() + + version_set_crs.append(cr) + if cr.next_draft: + version_set_crs.append(cr.next_draft) + + # DatasetVersionSet shouldn't have multiple metadata_provider_users but this supports them just in case + if cr.metadata_provider_user and cr.metadata_provider_user not in version_set_users: + version_set_users.append(cr.metadata_provider_user) + + prev_version_set_id = cr.dataset_version_set_id + flush_version_set() + + logger.info(f"Added {num_perms} EditorPermissions to {num_datasets} datasets") + + +def revert(apps, schema_editor): + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ("metax_api", "0040_auto_20211006_1116"), + ] + + operations = [ + migrations.CreateModel( + name="EditorPermissions", + fields=[ + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ], + ), + migrations.CreateModel( + name="EditorUserPermission", + fields=[ + ( + "id", + models.UUIDField( + default=uuid.uuid4, + editable=False, + primary_key=True, + serialize=False, + ), + ), + ("active", models.BooleanField(default=True)), + ("removed", models.BooleanField(default=False)), + ("date_modified", models.DateTimeField(null=True)), + ("user_modified", models.CharField(max_length=200, null=True)), + ("date_created", models.DateTimeField()), + ("user_created", models.CharField(max_length=200, null=True)), + ( + "service_modified", + models.CharField( + help_text="Name of the service who last modified the record", + max_length=200, + null=True, + ), + ), + ( + "service_created", + models.CharField( + help_text="Name of the service who created the record", + max_length=200, + null=True, + ), + ), + ("date_removed", models.DateTimeField(null=True)), + ("user_id", models.CharField(max_length=200)), + ( + "role", + models.CharField( + choices=[("creator", "Creator"), ("editor", "Editor")], + max_length=16, + ), + ), + ("verified", models.BooleanField(default=False)), + ("verification_token", models.CharField(max_length=32, null=True)), + ("verification_token_expires", models.DateTimeField(null=True)), + ( + "editor_permissions", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="users", + to="metax_api.editorpermissions", + ), + ), + ], + ), + migrations.AddField( + model_name="catalogrecord", + name="editor_permissions", + field=models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.PROTECT, + related_name="catalog_records", + to="metax_api.editorpermissions", + ), + ), + migrations.AddIndex( + model_name="editoruserpermission", + index=models.Index(fields=["user_id"], name="metax_api_e_user_id_0b47cc_idx"), + ), + migrations.AddConstraint( + model_name="editoruserpermission", + constraint=models.UniqueConstraint( + fields=("editor_permissions", "user_id"), + name="unique_dataset_user_permission", + ), + ), + migrations.AddConstraint( + model_name="editoruserpermission", + constraint=models.CheckConstraint( + check=models.Q(_negated=True, user_id=""), name="require_user_id" + ), + ), + migrations.AddConstraint( + model_name="editoruserpermission", + constraint=models.CheckConstraint( + check=models.Q(role__in=["creator", "editor"]), name="require_role" + ), + ), + migrations.RunPython(add_permissions, revert), + migrations.AlterField( + model_name="catalogrecord", + name="editor_permissions", + field=models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, + related_name="catalog_records", + to="metax_api.editorpermissions", + ), + ), + ] diff --git a/src/metax_api/migrations/0042_auto_20211108_1256.py b/src/metax_api/migrations/0042_auto_20211108_1256.py new file mode 100644 index 00000000..51e1afcb --- /dev/null +++ b/src/metax_api/migrations/0042_auto_20211108_1256.py @@ -0,0 +1,21 @@ +# Generated by Django 3.1.13 on 2021-11-08 10:56 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0041_add_editorpermissions'), + ] + + operations = [ + migrations.RemoveField( + model_name='editoruserpermission', + name='verification_token', + ), + migrations.RemoveField( + model_name='editoruserpermission', + name='verification_token_expires', + ), + ] diff --git a/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py b/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py new file mode 100644 index 00000000..220acfab --- /dev/null +++ b/src/metax_api/migrations/0043_remove_editoruserpermission_verified.py @@ -0,0 +1,17 @@ +# Generated by Django 3.1.13 on 2021-11-10 09:30 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0042_auto_20211108_1256'), + ] + + operations = [ + migrations.RemoveField( + model_name='editoruserpermission', + name='verified', + ), + ] diff --git a/src/metax_api/models/__init__.py b/src/metax_api/models/__init__.py index ec08fb44..643cac39 100755 --- a/src/metax_api/models/__init__.py +++ b/src/metax_api/models/__init__.py @@ -6,7 +6,7 @@ # :license: MIT from .api_error import ApiError -from .catalog_record import AlternateRecordSet, CatalogRecord +from .catalog_record import AlternateRecordSet, CatalogRecord, EditorPermissions, EditorUserPermission from .catalog_record_v2 import CatalogRecordV2 from .common import Common from .contract import Contract diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index d052ee22..ddfa8f95 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -6,17 +6,21 @@ # :license: MIT import logging +import uuid from collections import defaultdict from copy import deepcopy +from datetime import datetime, timedelta from django.conf import settings from django.contrib.postgres.fields import ArrayField from django.db import connection, models, transaction from django.db.models import JSONField, Q, Sum from django.http import Http404 +from django.utils.crypto import get_random_string from rest_framework.serializers import ValidationError from metax_api.exceptions import Http400, Http403, Http503 +from metax_api.tasks.refdata.refdata_indexer import service from metax_api.utils import ( DelayedLog, IdentifierType, @@ -58,6 +62,61 @@ DFT_CATALOG = settings.DFT_DATA_CATALOG_IDENTIFIER +class EditorPermissions(models.Model): + """ + Shared permissions between linked copies of same dataset. + + Attaches a set of EditorUserPermission objects to a set of CatalogRecords. + """ + + id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) + + +class PermissionRole(models.TextChoices): + """Permission role for EditorPermission.""" + + CREATOR = "creator" + EDITOR = "editor" + + +class EditorUserPermission(Common): + """Table for attaching user roles to an EditorPermissions object.""" + + # Override inherited integer based id with uuid + id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) + + # MODEL FIELD DEFINITIONS # + editor_permissions = models.ForeignKey( + EditorPermissions, related_name="users", on_delete=models.CASCADE + ) + user_id = models.CharField(max_length=200) + role = models.CharField(max_length=16, choices=PermissionRole.choices) + + class Meta: + indexes = [ + models.Index( + fields=[ + "user_id", + ] + ), + ] + constraints = [ + models.UniqueConstraint( + fields=["editor_permissions", "user_id"], name="unique_dataset_user_permission" + ), + models.CheckConstraint(check=~models.Q(user_id=""), name="require_user_id"), + models.CheckConstraint( + check=models.Q(role__in=PermissionRole.values), name="require_role" + ), + ] + + def __repr__(self): + return f"" + + def delete(self, *args, **kwargs): + super().remove(*args, **kwargs) + + class DiscardRecord(Exception): pass @@ -445,6 +504,10 @@ class CatalogRecord(Common): help_text="Saves api related info about the dataset. E.g. api version", ) + editor_permissions = models.ForeignKey( + EditorPermissions, related_name="catalog_records", null=False, on_delete=models.PROTECT + ) + # END OF MODEL FIELD DEFINITIONS # """ @@ -1395,14 +1458,18 @@ def _pre_create_operations(self, pid_type=None): # Repotronic catalog does not need to validate unique identifiers # Raise validation error when not repotronic catalog - if self.data_catalog.catalog_json["identifier"] != settings.REPOTRONIC_DATA_CATALOG_IDENTIFIER: + if ( + self.data_catalog.catalog_json["identifier"] + != settings.REPOTRONIC_DATA_CATALOG_IDENTIFIER + ): raise ValidationError( { "detail": [ "Selected catalog %s is a legacy catalog. Preferred identifiers are not " "automatically generated for datasets stored in legacy catalogs, nor is " "their uniqueness enforced. Please provide a value for dataset field " - "preferred_identifier." % self.data_catalog.catalog_json["identifier"] + "preferred_identifier." + % self.data_catalog.catalog_json["identifier"] ] } ) @@ -1446,7 +1513,10 @@ def _pre_create_operations(self, pid_type=None): if "remote_resources" in self.research_dataset: self._calculate_total_remote_resources_byte_size() - if not ("files" in self.research_dataset or "directories" in self.research_dataset) and "total_files_byte_size" in self.research_dataset: + if ( + not ("files" in self.research_dataset or "directories" in self.research_dataset) + and "total_files_byte_size" in self.research_dataset + ): self.research_dataset.pop("total_files_byte_size") if self.cumulative_state == self.CUMULATIVE_STATE_CLOSED: @@ -1462,6 +1532,12 @@ def _pre_create_operations(self, pid_type=None): self._set_api_version() + # only new datasets need new EditorPermissions, copies already have one + if not self.editor_permissions_id: + self._add_editor_permissions() + if self.metadata_provider_user: + self._add_creator_editor_user_permission() + def _post_create_operations(self): if "files" in self.research_dataset or "directories" in self.research_dataset: # files must be added after the record itself has been created, to be able @@ -3037,6 +3113,27 @@ def _copy_undeleted_files_from_old_version(self): if DEBUG: _logger.debug("Added %d files to dataset %s" % (n_files_copied, self._new_version.id)) + def _add_editor_permissions(self): + permissions = EditorPermissions.objects.create() + self.editor_permissions = permissions + + def _add_creator_editor_user_permission(self): + """ + Add creator permission to a newly created CatalogRecord. + """ + perm = EditorUserPermission( + editor_permissions=self.editor_permissions, + user_id=self.metadata_provider_user, + role=PermissionRole.CREATOR, + date_created=self.date_created, + date_modified=self.date_modified, + user_created=self.user_created, + user_modified=self.user_modified, + service_created=self.service_created, + service_modified=self.service_modified, + ) + perm.save() + class RabbitMQPublishRecord: @@ -3080,7 +3177,9 @@ def __call__(self): do_publish = False if do_publish: - rabbitmq.publish(cr_json, routing_key=self.routing_key, exchange=exchange["NAME"]) + rabbitmq.publish( + cr_json, routing_key=self.routing_key, exchange=exchange["NAME"] + ) except: # note: if we'd like to let the request be a success even if this operation fails, # we could simply not raise an exception here. diff --git a/src/metax_api/models/catalog_record_v2.py b/src/metax_api/models/catalog_record_v2.py index b9ee5895..583a75c2 100755 --- a/src/metax_api/models/catalog_record_v2.py +++ b/src/metax_api/models/catalog_record_v2.py @@ -117,6 +117,12 @@ def _pre_create_operations(self): self._set_api_version() + # only new datasets need new EditorPermissions, copies already have one + if not self.editor_permissions_id: + self._add_editor_permissions() + if self.metadata_provider_user: + self._add_creator_editor_user_permission() + def _post_create_operations(self, pid_type=None): if "files" in self.research_dataset or "directories" in self.research_dataset: diff --git a/src/metax_api/services/catalog_record_service.py b/src/metax_api/services/catalog_record_service.py index 3f144272..73277998 100755 --- a/src/metax_api/services/catalog_record_service.py +++ b/src/metax_api/services/catalog_record_service.py @@ -130,8 +130,28 @@ def get_queryset_search_params(cls, request): queryset_search_params["api_meta__contains"] = {"version": value} + if request.query_params.get("editor_permissions_user"): + cls.filter_by_editor_permissions_user(request, queryset_search_params) + return queryset_search_params + @staticmethod + def filter_by_editor_permissions_user(request, queryset_search_params): + """ + Add filter for querying datasets where user has editor user permissions. + """ + user_id = request.query_params["editor_permissions_user"] + + # non-service users can only query their own datasets + if not request.user.is_service: + if request.user.username == '': + raise Http403({"detail": ["Query by editor_permissions_user is only supported for authenticated users"]}) + if request.user.username != user_id: + raise Http403({"detail": ["Provided editor_permissions_user does not match current user"]}) + + queryset_search_params["editor_permissions__users__user_id"] = user_id + queryset_search_params["editor_permissions__users__removed"] = False + @staticmethod def filter_by_state(request, queryset_search_params): """ diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 4948560e..156a5b4f 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -11,6 +11,7 @@ "datacatalogs": {}, "datasets": {}, "directories": {}, + "editorpermissions": {}, "files": {}, "filestorages": {}, "schemas": {}, @@ -96,6 +97,29 @@ def __lt__(self, other): Role.ETSIN, ] +api_permissions.rest.editorpermissions.create = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] +api_permissions.rest.editorpermissions.read = [Role.ALL] +api_permissions.rest.editorpermissions["update"] = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] +api_permissions.rest.editorpermissions.delete = [ + Role.METAX, + Role.END_USERS, + Role.TPAS, + Role.QVAIN, + Role.ETSIN, +] + api_permissions.rest.directories.read = [ Role.METAX, Role.QVAIN, diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index 9c76ecc8..fdd6bdfc 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -640,7 +640,7 @@ paths: in: query description: Sets paging on with default limit of 10 required: false - type: bolean + type: boolean - name: offset in: query description: Offset for paging @@ -1414,8 +1414,133 @@ paths: description: Resource not found. tags: - Dataset API - - + /rest/datasets/{CRID}/editor_permissions/users: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + type: array + items: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + post: + summary: Create a new editor permission of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: body + in: body + schema: + type: object + properties: + user_id: + type: string + role: + type: string + responses: + "201": + description: Successful operation, return created editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + /rest/datasets/{CRID}/editor_permissions/users/{USER_ID}: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + patch: + summary: Update role or enable verified + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + - name: body + in: body + schema: + $ref: '#/definitions/EditorUserPermission' + responses: + "200": + description: Successful operation, return changed editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + delete: + summary: Editorpermission marked as removed + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + '200': + description: Successful operation + '400': + description: Bad request + '403': + description: Forbidden + '404': + description: Not found + tags: + - Dataset API # Contract API /rest/contracts: get: @@ -2043,8 +2168,43 @@ definitions: count_cumulative: type: number format: float - - + EditorUserPermission: + type: object + properties: + id: + type: integer + readOnly: true + active: + type: boolean + removed: + type: boolean + date_modified: + type: string + format: date-time + readOnly: true + user_modified: + type: string + date_created: + type: string + format: date-time + readOnly: true + user_created: + type: string + service_modified: + type: string + service_created: + type: string + date_removed: + type: string + format: date-time + user_id: + type: string + role: + type: string + verified: + type: boolean + editor_permission_id: + type: integer examples: count_datasets: type: object diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 682ffdf8..f7c29ee7 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -1598,7 +1598,133 @@ paths: description: Resource not found. tags: - Dataset API - + /rest/datasets/{CRID}/editor_permissions/users: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + type: array + items: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + post: + summary: Create a new editor permission of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: body + in: body + schema: + type: object + properties: + user_id: + type: string + role: + type: string + responses: + "201": + description: Successful operation, return created editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + /rest/datasets/{CRID}/editor_permissions/users/{USER_ID}: + get: + summary: List all editor permissions of a record + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + "200": + description: Successful operation, return a list of editor rights. May return an empty list. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + patch: + summary: Update role + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + - name: body + in: body + schema: + $ref: '#/definitions/EditorUserPermission' + responses: + "200": + description: Successful operation, return changed editor rights. + schema: + $ref: '#/definitions/EditorUserPermission' + "400": + description: Bad request. + "404": + description: Resource not found. + tags: + - Dataset API + delete: + summary: Editorpermission marked as removed + parameters: + - name: CRID + in: path + description: Catalog record ID + required: true + type: string + - name: USER_ID + in: path + description: User ID + required: true + type: string + responses: + '200': + description: Successful operation + '400': + description: Bad request + '403': + description: Forbidden + '404': + description: Not found + tags: + - Dataset API # Contract API /rest/v2/contracts: @@ -2389,7 +2515,41 @@ definitions: count_cumulative: type: number format: float - + EditorUserPermission: + type: object + properties: + id: + type: integer + readOnly: true + active: + type: boolean + removed: + type: boolean + date_modified: + type: string + format: date-time + readOnly: true + user_modified: + type: string + date_created: + type: string + format: date-time + readOnly: true + user_created: + type: string + service_modified: + type: string + service_created: + type: string + date_removed: + type: string + format: date-time + user_id: + type: string + role: + type: string + editor_permission_id: + type: integer examples: count_datasets: diff --git a/src/metax_api/tests/api/rest/base/views/datasets/read.py b/src/metax_api/tests/api/rest/base/views/datasets/read.py index ce84e9dd..688e99f2 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/read.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/read.py @@ -965,6 +965,19 @@ def test_filter_by_legacy(self): self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual(count_all, response.data["count"], response.data) + def test_filter_by_editor_permissions_user_ok(self): + cr = CatalogRecord.objects.get(pk=1) + cr.editor_permissions.users.update(user_id='test_user_x') + response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") + self.assertEqual(response.data["count"], 1) + + def test_filter_by_editor_permissions_user_removed(self): + cr = CatalogRecord.objects.get(pk=1) + cr.editor_permissions.users.update(user_id='test_user_x') + cr.editor_permissions.users.first().delete() + response = self.client.get(f"/rest/datasets?editor_permissions_user=test_user_x") + self.assertEqual(response.data["count"], 0) + class CatalogRecordApiReadXMLTransformationTests(CatalogRecordApiReadCommon): diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index ce3ec89f..d9a938f1 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -556,6 +556,23 @@ def test_create_catalog_record_using_pid_type(self): response.data["research_dataset"]["preferred_identifier"].startswith("urn:") ) + def test_create_catalog_record_adds_creator_permission(self): + response = self.client.post( + "/rest/datasets", + self.cr_test_data, + format="json", + ) + cr = CatalogRecord.objects.get(id=response.data["id"]) + self.assertEqual( + list(cr.editor_permissions.users.values("user_id", "role")), + [ + { + "user_id": self.cr_test_data["metadata_provider_user"], + "role": "creator", + } + ], + ) + class CatalogRecordApiWriteIdentifierUniqueness(CatalogRecordApiWriteCommon): """ diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py new file mode 100644 index 00000000..aef77389 --- /dev/null +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/__init__.py @@ -0,0 +1,2 @@ +from .read import * +from .write import * diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py new file mode 100644 index 00000000..1707325d --- /dev/null +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py @@ -0,0 +1,90 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + +from json import load as json_load +import uuid + +from django.core.management import call_command + +from rest_framework import status +from rest_framework.test import APITestCase + +from metax_api.tests.utils import TestClassUtils, test_data_file_path + + +class EditorUserPermissionApiReadCommon(APITestCase, TestClassUtils): + @classmethod + def setUpClass(cls): + """Loaded only once for test cases inside this class.""" + call_command("loaddata", test_data_file_path, verbosity=0) + super(EditorUserPermissionApiReadCommon, cls).setUpClass() + + def setUp(self): + self.cr_from_test_data = self._get_whole_object_from_test_data( + "catalogrecord", requested_pk=1 + ) + self.crid = self.cr_from_test_data["pk"] + self.identifier = "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" + self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] + self.editor_user_permission = self._get_whole_object_from_test_data( + "editoruserpermission", requested_pk=str(uuid.UUID(int=1)) + ) + self.userid = self.editor_user_permission["fields"]["user_id"] + self._use_http_authorization() + + def _get_whole_object_from_test_data(self, model_name, requested_pk=0): + + with open(test_data_file_path) as test_data_file: + test_data = json_load(test_data_file) + + model = "metax_api.%s" % model_name + + for row in test_data: + if row["model"] == model: + if row["pk"] == requested_pk: + obj = { + "id": row["pk"], + } + obj.update(row) + return obj + + raise Exception( + "Could not find model %s from test data with pk == %d. " + "Are you certain you generated rows for model %s in generate_test_data.py?" + % (model_name, requested_pk, model_name) + ) + + +class EditorUserPermissionApiReadBasicTests(EditorUserPermissionApiReadCommon): + + """Basic read operations.""" + + def test_read_editor_permission_list_with_pk(self): + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + def test_read_editor_permission_list_with_uuid(self): + response = self.client.get("/rest/datasets/%s/editor_permissions/users" % self.identifier) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + def test_read_editor_permission_list_invalid(self): + response = self.client.get("/rest/datasets/99999/editor_permissions/users") + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_read_editor_permission_details_by_pk(self): + response = self.client.get( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, self.userid) + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(str(response.data["editor_permissions"]), self.permissionid) + self.assertEqual(response.data["user_id"], self.userid) + + def test_read_editor_permission_details_by_pk_invalid(self): + response = self.client.get( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, "invalid") + ) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py new file mode 100644 index 00000000..45a39a88 --- /dev/null +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py @@ -0,0 +1,182 @@ +# This file is part of the Metax API service +# +# Copyright 2017-2018 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT + + +from json import load as json_load +import uuid + +from django.core.management import call_command + +from rest_framework import status +from rest_framework.test import APITestCase + +from metax_api.models import EditorUserPermission +from metax_api.tests.utils import TestClassUtils, test_data_file_path + + +class EditorUserPermissionApiWriteCommon(APITestCase, TestClassUtils): + @classmethod + def setUpClass(cls): + """ + Loaded only once for test cases inside this class. + """ + call_command("loaddata", test_data_file_path, verbosity=0) + super(EditorUserPermissionApiWriteCommon, cls).setUpClass() + + def setUp(self): + self.cr_from_test_data = self._get_whole_object_from_test_data( + "catalogrecord", requested_pk=1 + ) + self.crid = self.cr_from_test_data["pk"] + self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] + self.editor_user_permission = self._get_whole_object_from_test_data( + "editoruserpermission", requested_pk=str(uuid.UUID(int=1)) + ) + self.userid = self.editor_user_permission["fields"]["user_id"] + self._use_http_authorization() + + def _get_whole_object_from_test_data(self, model_name, requested_pk=0): + + with open(test_data_file_path) as test_data_file: + test_data = json_load(test_data_file) + + model = "metax_api.%s" % model_name + + for row in test_data: + if row["model"] == model: + if row["pk"] == requested_pk: + obj = { + "id": row["pk"], + } + obj.update(row) + return obj + + raise Exception( + "Could not find model %s from test data with pk == %d. " + "Are you certain you generated rows for model %s in generate_test_data.py?" + % (model_name, requested_pk, model_name) + ) + + +class EditorUserPermissionApiWriteBasicTests(EditorUserPermissionApiWriteCommon): + + """ + Basic read operations + """ + + def test_write_editor_permission(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "test_editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + self.assertEqual(str(response.data["editor_permissions"]), self.permissionid) + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), user_count + 1) + + def test_write_editor_permission_invalid_data(self): + self._set_http_authorization("service") + data = {"role": "editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), user_count) + + def test_write_editor_permission_existing_userid(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "double_editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + user_count = len(response.data) + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(len(response.data), user_count + 1) + + def test_write_editor_permission_change_values(self): + self._set_http_authorization("service") + data = {"role": "creator", "user_id": "change_editor"} + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + new_data = {"role": "editor"} + response = self.client.patch( + "/rest/datasets/%d/editor_permissions/users/%s" + % (self.crid, response.data.get("user_id")), + new_data, + format="json", + ) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + self.assertEqual(response.data.get("role"), "editor") + + def test_write_editor_permission_remove_users(self): + self._set_http_authorization("service") + data = {"role": "creator", "user_id": "new_creator"} + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get("user_id")) + ) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + + response = self.client.get("/rest/datasets/%d/editor_permissions/users" % self.crid) + for user in response.data: + if user.get("role") == "creator": + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" + % (self.crid, user.get("user_id")) + ) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data) + else: + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" + % (self.crid, user.get("user_id")) + ) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + + def test_write_editor_permission_add_removed_user(self): + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "new_editor"} + # add + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + # remove + response = self.client.delete( + "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, data.get("user_id")) + ) + self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) + removed_user = EditorUserPermission.objects_unfiltered.get( + user_id="new_editor", editor_permissions_id=self.permissionid + ) + self.assertEqual(removed_user.removed, True) + response = self.client.post( + "/rest/datasets/%d/editor_permissions/users" % self.crid, data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + self.assertEqual(response.data.get("removed"), False) diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py index 710ecfef..398e661c 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/drafts.py @@ -612,6 +612,22 @@ def test_create_and_merge_draft(self): ) self.assertEqual("next_draft" in response.data, False, "next_draft link should be gone") + def test_create_and_merge_draft_keeps_permissions(self): + """ + Ensure creating and merging drafts keeps the same EditorPermission object. + """ + cr = self._create_dataset() + original_editor_permissions_id = CatalogRecordV2.objects.get(id=cr['id']).editor_permissions_id + + draft_cr = self._create_draft(cr["id"]) + draft_editor_permissions_id = CatalogRecordV2.objects.get(id=draft_cr['id']).editor_permissions_id + self.assertEqual(draft_editor_permissions_id, original_editor_permissions_id) + + self._merge_draft_changes(draft_cr["id"]) + merged_editor_permissions_id = CatalogRecordV2.objects.get(id=cr['id']).editor_permissions_id + self.assertEqual(merged_editor_permissions_id, original_editor_permissions_id) + + def test_missing_issued_date_is_generated_when_draft_is_merged(self): """ Testing a case where user removes 'issued_date' from draft before merging diff --git a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py index d89f0c29..d61fce67 100755 --- a/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/v2/views/dataset_rpc.py @@ -121,6 +121,21 @@ def test_create_new_version(self): response.data["identifier"], ) + def test_create_new_version_shares_permissions(self): + """ + Ensure new version shares EditorPermissions with the original. + """ + response = self.client.post( + "/rpc/v2/datasets/create_new_version?identifier=5", format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + next_version_identifier = response.data.get("identifier") + cr = CR.objects.get(id=5) + next_version_cr = CR.objects.get(identifier=next_version_identifier) + self.assertEqual(cr.editor_permissions_id, next_version_cr.editor_permissions_id) + + def test_delete_new_version_draft(self): """ Ensure a new version that is created into draft state can be deleted, and is permanently deleted. diff --git a/src/metax_api/tests/testdata/generate_test_data.py b/src/metax_api/tests/testdata/generate_test_data.py index 6729396a..85dcaf78 100755 --- a/src/metax_api/tests/testdata/generate_test_data.py +++ b/src/metax_api/tests/testdata/generate_test_data.py @@ -7,6 +7,7 @@ import os import sys +import uuid from copy import deepcopy from json import dump as json_dump, load as json_load @@ -268,6 +269,7 @@ def save_test_data( contract_list, catalog_record_list, dataset_version_sets, + editor_permissions, ): with open("test_data.json", "w") as f: print("dumping test data as json to metax_api/tests/test_data.json...") @@ -278,6 +280,7 @@ def save_test_data( + data_catalogs_list + contract_list + dataset_version_sets + + editor_permissions + catalog_record_list, f, indent=4, @@ -368,6 +371,30 @@ def generate_contracts(contract_max_rows, validate_json): return test_contract_list +def add_editor_permissions(editor_permissions, dataset): + # add EditorPermissions + pk = len(editor_permissions) + rights_pk = str(uuid.UUID(int=pk)) + editor_perms = { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": rights_pk, + } + editor_permissions.append(editor_perms) + editor_user_perms = { + "fields": { + "user_id": dataset["fields"]["metadata_provider_user"], + "date_created": dataset["fields"]["date_created"], + "editor_permissions_id": rights_pk, + "role": "creator", + }, + "model": "metax_api.editoruserpermission", + "pk": str(uuid.UUID(int=pk + 1)), + } + editor_permissions.append(editor_user_perms) + dataset["fields"]["editor_permissions_id"] = rights_pk + + def generate_catalog_records( basic_catalog_record_max_rows, data_catalogs_list, @@ -377,6 +404,7 @@ def generate_catalog_records( type, test_data_list=[], dataset_version_sets=[], + editor_permissions=[], ): print("generating %s catalog records..." % type) @@ -426,6 +454,8 @@ def generate_catalog_records( new["fields"]["date_created"] = "2017-05-23T10:07:22Z" new["fields"]["files"] = [] + add_editor_permissions(editor_permissions, new) + # add files if type == "ida": @@ -454,62 +484,46 @@ def generate_catalog_records( # first fifth of files dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/text", - "pref_label": { - "fi": "Teksti", - "en": "Text", - "und": "Teksti" - }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" + "pref_label": {"fi": "Teksti", "en": "Text", "und": "Teksti"}, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", } dataset_files[-1]["use_category"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/source", "pref_label": { "fi": "LƤhdeaineisto", "en": "Source material", - "und": "LƤhdeaineisto" + "und": "LƤhdeaineisto", }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", } elif file_divider <= j < (file_divider * 2): # second fifth of files dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/video", - "pref_label": { - "fi": "Video", - "en": "Video", - "und": "Video" - }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" + "pref_label": {"fi": "Video", "en": "Video", "und": "Video"}, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", } dataset_files[-1]["use_category"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/outcome", "pref_label": { "fi": "Tulosaineisto", "en": "Outcome material", - "und": "Tulosaineisto" + "und": "Tulosaineisto", }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", } elif (file_divider * 2) <= j < (file_divider * 3): # third fifth of files dataset_files[-1]["file_type"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/file_type/code/image", - "pref_label": { - "fi": "Kuva", - "en": "Image", - "und": "Kuva" - }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" + "pref_label": {"fi": "Kuva", "en": "Image", "und": "Kuva"}, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", } dataset_files[-1]["use_category"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/publication", - "pref_label": { - "fi": "Julkaisu", - "en": "Publication", - "und": "Julkaisu" - }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" + "pref_label": {"fi": "Julkaisu", "en": "Publication", "und": "Julkaisu"}, + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", } elif (file_divider * 3) <= j < (file_divider * 4): # fourth fifth of files @@ -518,18 +532,18 @@ def generate_catalog_records( "pref_label": { "fi": "LƤhdekoodi", "en": "Source code", - "und": "LƤhdekoodi" + "und": "LƤhdekoodi", }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/file_type", } dataset_files[-1]["use_category"] = { "identifier": "http://uri.suomi.fi/codelist/fairdata/use_category/code/documentation", "pref_label": { "fi": "Dokumentaatio", "en": "Documentation", - "und": "Dokumentaatio" + "und": "Dokumentaatio", }, - "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category" + "in_scheme": "http://uri.suomi.fi/codelist/fairdata/use_category", } else: # the rest of files @@ -652,6 +666,8 @@ def generate_catalog_records( new["fields"]["date_modified"] = "2017-09-23T10:07:22Z" new["fields"]["date_created"] = "2017-05-23T10:07:22Z" + add_editor_permissions(editor_permissions, new) + new["fields"]["research_dataset"]["metadata_version_identifier"] = generate_test_identifier( cr_type, len(test_data_list) + 1, urn=False ) @@ -761,7 +777,7 @@ def generate_catalog_records( json_validate(new["fields"]["research_dataset"], json_schema) test_data_list.append(new) - return test_data_list, dataset_version_sets + return test_data_list, dataset_version_sets, editor_permissions def generate_alt_catalog_records(test_data_list): @@ -844,7 +860,7 @@ def set_qvain_info_to_records(catalog_record_list): ida_data_catalog_max_rows + 1, att_data_catalog_max_rows, validate_json, "att" ) - catalog_record_list, dataset_version_sets = generate_catalog_records( + catalog_record_list, dataset_version_sets, editor_permissions = generate_catalog_records( ida_catalog_record_max_rows, ida_data_catalogs_list, contract_list, @@ -853,7 +869,7 @@ def set_qvain_info_to_records(catalog_record_list): "ida", ) - catalog_record_list, dataset_version_sets = generate_catalog_records( + catalog_record_list, dataset_version_sets, editor_permissions = generate_catalog_records( att_catalog_record_max_rows, att_data_catalogs_list, contract_list, @@ -862,6 +878,7 @@ def set_qvain_info_to_records(catalog_record_list): "att", catalog_record_list, dataset_version_sets, + editor_permissions, ) catalog_record_list = generate_alt_catalog_records(catalog_record_list) @@ -876,6 +893,7 @@ def set_qvain_info_to_records(catalog_record_list): contract_list, catalog_record_list, dataset_version_sets, + editor_permissions, ) print("done") diff --git a/src/metax_api/tests/testdata/test_data.json b/src/metax_api/tests/testdata/test_data.json index 8f513dd9..025b175e 100755 --- a/src/metax_api/tests/testdata/test_data.json +++ b/src/metax_api/tests/testdata/test_data.json @@ -5712,6 +5712,396 @@ "model": "metax_api.datasetversionset", "pk": 13 }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000000" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000000", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000001" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000002" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000002", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000003" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000004" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000004", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000005" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000006" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000006", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000007" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000008" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000008", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000009" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000000a" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000000a", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000000b" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000000c" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000000c", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000000d" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000000e" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000000e", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000000f" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000010" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000010", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000011" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000012" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000012", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000013" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000014" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000014", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000015" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000016" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000016", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000017" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000018" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000018", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000019" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000001a" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000001a", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000001b" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000001c" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000001c", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000001d" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000001e" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000001e", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000001f" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000020" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000020", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000021" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000022" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000022", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000023" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000024" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000024", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000025" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000026" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000026", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000027" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000028" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000028", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000029" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000002a" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000002a", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000002b" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000002c" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000002c", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000002d" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-00000000002e" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000002e", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-00000000002f" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000030" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000030", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000031" + }, + { + "fields": {}, + "model": "metax_api.editorpermissions", + "pk": "00000000-0000-0000-0000-000000000032" + }, + { + "fields": { + "date_created": "2017-05-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000032", + "role": "creator", + "user_id": "abc-user-123" + }, + "model": "metax_api.editoruserpermission", + "pk": "00000000-0000-0000-0000-000000000033" + }, { "fields": {}, "model": "metax_api.alternaterecordset", @@ -5727,6 +6117,7 @@ "dataset_version_set": 1, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000000", "files": [ 1, 2 @@ -5882,6 +6273,7 @@ "dataset_version_set": 2, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000002", "files": [ 3, 4 @@ -6037,6 +6429,7 @@ "dataset_version_set": 3, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000004", "files": [ 5, 6 @@ -6192,6 +6585,7 @@ "dataset_version_set": 4, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000006", "files": [ 7, 8 @@ -6347,6 +6741,7 @@ "dataset_version_set": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000008", "files": [ 9, 10 @@ -6502,6 +6897,7 @@ "dataset_version_set": 6, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000000a", "files": [ 11, 12 @@ -6657,6 +7053,7 @@ "dataset_version_set": 7, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000000c", "files": [ 13, 14 @@ -6812,6 +7209,7 @@ "dataset_version_set": 8, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000000e", "files": [ 15, 16 @@ -6952,6 +7350,7 @@ "dataset_version_set": 9, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000010", "files": [ 17, 18 @@ -7078,6 +7477,7 @@ "dataset_version_set": 10, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000012", "files": [ 19, 20 @@ -7201,6 +7601,7 @@ "dataset_version_set": 11, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000014", "files": [ 1, 2, @@ -8152,6 +8553,7 @@ "dataset_version_set": 12, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000016", "files": [ 1, 2, @@ -9103,6 +9505,7 @@ "dataset_version_set": 13, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000018", "files": [ 22, 23, @@ -10146,6 +10549,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000001a", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9614", "metadata_owner_org": "abc-org-123", @@ -10285,6 +10689,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000001c", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9615", "metadata_owner_org": "abc-org-123", @@ -10424,6 +10829,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000001e", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9616", "metadata_owner_org": "abc-org-123", @@ -10563,6 +10969,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000020", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9617", "metadata_owner_org": "abc-org-123", @@ -10702,6 +11109,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000022", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9618", "metadata_owner_org": "abc-org-123", @@ -10841,6 +11249,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000024", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9619", "metadata_owner_org": "abc-org-123", @@ -10980,6 +11389,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000026", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9620", "metadata_owner_org": "abc-org-123", @@ -11110,6 +11520,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000028", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9621", "metadata_owner_org": "abc-org-123", @@ -11240,6 +11651,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000002a", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9622", "metadata_owner_org": "abc-org-123", @@ -11370,6 +11782,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000002c", "files": [], "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9623", "metadata_owner_org": "abc-org-123", @@ -11499,6 +11912,7 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-00000000002e", "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9624", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -12433,6 +12847,7 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000030", "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9625", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -13367,6 +13782,7 @@ "data_catalog": 5, "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-09-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000032", "identifier": "cr955e904-e3dd-4d7e-99f1-3fed446f9626", "metadata_owner_org": "abc-org-123", "metadata_provider_org": "abc-org-123", @@ -14304,6 +14720,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000012", "files": [ 19, 20 @@ -14429,6 +14846,7 @@ "dataset_group_edit": "default-dataset-edit-group", "date_created": "2017-05-23T10:07:22Z", "date_modified": "2017-06-23T10:07:22Z", + "editor_permissions_id": "00000000-0000-0000-0000-000000000012", "files": [ 19, 20 From 12294a0856e881fe833b5242872025d71e12f368 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 16 Dec 2021 09:58:17 +0200 Subject: [PATCH 125/160] Set Aalto Acris catalog as harvested --- src/metax_api/initialdata/datacatalogs.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index aa48b0fe..72abdeba 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -477,7 +477,7 @@ "identifier": "http://lexvo.org/id/iso639-3/fin" } ], - "harvested": false, + "harvested": true, "publisher": { "name": { "en": "Aalto ACRIS", From f7bac506358b156b12066b4dd6ed91705ed3bd20 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 16 Dec 2021 16:23:00 +0200 Subject: [PATCH 126/160] Added a migration --- .../0044_change_aalto_catalog_to_harvested.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py diff --git a/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py b/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py new file mode 100644 index 00000000..f98e7fa8 --- /dev/null +++ b/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py @@ -0,0 +1,29 @@ +# Generated by Django 3.1.13 on 2021-12-16 13:39 + +from django.db import migrations + +import logging + +logger = logging.getLogger(__name__) + +def change_aalto_catalog_harvested_value(apps, schema_editor, harvested_value = True): + logger.info(f"Change Aalto catalog harvested value to: {harvested_value}") + DataCatalog = apps.get_model('metax_api', 'DataCatalog') + aalto_catalog = DataCatalog.objects.get(catalog_json__identifier = "urn:nbn:fi:att:data-catalog-acris") + aalto_catalog.catalog_json["harvested"] = harvested_value + aalto_catalog.save() + +def revert(apps, schema_editor): + change_aalto_catalog_harvested_value(apps, schema_editor, harvested_value = False) + + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0043_remove_editoruserpermission_verified'), + ] + + operations = [ + migrations.RunPython(change_aalto_catalog_harvested_value, revert), + ] From c346b3ccb3dd6f62f6aaf11047da11c78cd075b0 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 16 Dec 2021 17:02:59 +0200 Subject: [PATCH 127/160] Check that Aalto catalog exists in migration --- .../0044_change_aalto_catalog_to_harvested.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) mode change 100644 => 100755 src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py diff --git a/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py b/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py old mode 100644 new mode 100755 index f98e7fa8..54ca0604 --- a/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py +++ b/src/metax_api/migrations/0044_change_aalto_catalog_to_harvested.py @@ -1,5 +1,6 @@ # Generated by Django 3.1.13 on 2021-12-16 13:39 +from django.core.exceptions import ObjectDoesNotExist from django.db import migrations import logging @@ -8,10 +9,14 @@ def change_aalto_catalog_harvested_value(apps, schema_editor, harvested_value = True): logger.info(f"Change Aalto catalog harvested value to: {harvested_value}") - DataCatalog = apps.get_model('metax_api', 'DataCatalog') - aalto_catalog = DataCatalog.objects.get(catalog_json__identifier = "urn:nbn:fi:att:data-catalog-acris") - aalto_catalog.catalog_json["harvested"] = harvested_value - aalto_catalog.save() + try: + DataCatalog = apps.get_model('metax_api', 'DataCatalog') + aalto_catalog = DataCatalog.objects.get(catalog_json__identifier = "urn:nbn:fi:att:data-catalog-acris") + aalto_catalog.catalog_json["harvested"] = harvested_value + aalto_catalog.save() + except DataCatalog.DoesNotExist: + logger.info("Aalto catalog does not exist. Passing") + pass def revert(apps, schema_editor): change_aalto_catalog_harvested_value(apps, schema_editor, harvested_value = False) From ddbea0b197f9375dd9ac80badef133d71c3e1e5f Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Thu, 16 Dec 2021 15:31:46 +0200 Subject: [PATCH 128/160] CSCFAIRMETA-1271: Allow only services and owner access editor permissions --- .../base/views/editor_permissions_view.py | 35 ++++++++++++++--- .../rest/base/views/editorpermissions/read.py | 20 ++++++++++ .../base/views/editorpermissions/write.py | 39 ++++++++++++++++++- 3 files changed, 87 insertions(+), 7 deletions(-) diff --git a/src/metax_api/api/rest/base/views/editor_permissions_view.py b/src/metax_api/api/rest/base/views/editor_permissions_view.py index 2ff433c1..2c87c001 100644 --- a/src/metax_api/api/rest/base/views/editor_permissions_view.py +++ b/src/metax_api/api/rest/base/views/editor_permissions_view.py @@ -11,12 +11,12 @@ from django.shortcuts import get_object_or_404 from rest_framework import status - from rest_framework.response import Response +from metax_api.exceptions import Http403 from metax_api.models import CatalogRecord from metax_api.models.catalog_record import PermissionRole, EditorUserPermission -from metax_api.permissions import ServicePermissions +from metax_api.permissions import ServicePermissions, EndUserPermissions from metax_api.services import CommonService from ..serializers import EditorPermissionsSerializer @@ -27,17 +27,40 @@ class EditorPermissionViewSet(CommonViewSet): lookup_field = "user_id" - permission_classes = [ServicePermissions,] + permission_classes = [ + ServicePermissions, + EndUserPermissions, + ] serializer_class = EditorPermissionsSerializer def __init__(self, *args, **kwargs): super(EditorPermissionViewSet, self).__init__(*args, **kwargs) + def _user_has_access_to_permissions(self, cr): + """ + Allow only services and dataset owner access permissions. + """ + if self.request.user.is_service: + if self.request.method == "GET": + return True + # for updating perms, require edit access to catalog records + return cr._check_catalog_permissions( + cr.data_catalog.catalog_record_group_edit, + cr.data_catalog.catalog_record_services_edit, + self.request, + ) + if cr.user_is_owner(self.request): + return True + # unknown user + return False + def get_queryset(self): - if CommonService.is_primary_key(self.kwargs['cr_identifier']): - cr = get_object_or_404(CatalogRecord, pk=int(self.kwargs['cr_identifier'])) + if CommonService.is_primary_key(self.kwargs["cr_identifier"]): + cr = get_object_or_404(CatalogRecord, pk=int(self.kwargs["cr_identifier"])) else: - cr = get_object_or_404(CatalogRecord, identifier=self.kwargs['cr_identifier']) + cr = get_object_or_404(CatalogRecord, identifier=self.kwargs["cr_identifier"]) + if not self._user_has_access_to_permissions(cr): + raise Http403({"detail": ["You do not have access to permissions of this dataset."]}) return cr.editor_permissions.users def list(self, request, *args, **kwargs): diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py index 1707325d..8cf73f33 100644 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/read.py @@ -7,6 +7,7 @@ from json import load as json_load import uuid +import responses from django.core.management import call_command @@ -30,6 +31,7 @@ def setUp(self): self.crid = self.cr_from_test_data["pk"] self.identifier = "cr955e904-e3dd-4d7e-99f1-3fed446f96d1" self.permissionid = self.cr_from_test_data["fields"]["editor_permissions_id"] + self.metadata_provider_user = self.cr_from_test_data["fields"]["metadata_provider_user"] self.editor_user_permission = self._get_whole_object_from_test_data( "editoruserpermission", requested_pk=str(uuid.UUID(int=1)) ) @@ -88,3 +90,21 @@ def test_read_editor_permission_details_by_pk_invalid(self): "/rest/datasets/%d/editor_permissions/users/%s" % (self.crid, "invalid") ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + @responses.activate + def test_read_editor_permission_list_by_wrong_user(self): + self._mock_token_validation_succeeds() + self._use_http_authorization( + method="bearer", token={"group_names": [], "CSCUserName": "not_dataset_creator"} + ) + response = self.client.get(f"/rest/datasets/{self.crid}/editor_permissions/users") + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + + @responses.activate + def test_read_editor_permission_list_by_provider_user(self): + self._mock_token_validation_succeeds() + self._use_http_authorization( + method="bearer", token={"group_names": [], "CSCUserName": self.metadata_provider_user} + ) + response = self.client.get(f"/rest/datasets/{self.crid}/editor_permissions/users") + self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py index 45a39a88..99ccc858 100644 --- a/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py +++ b/src/metax_api/tests/api/rest/base/views/editorpermissions/write.py @@ -9,12 +9,13 @@ from json import load as json_load import uuid +import responses from django.core.management import call_command from rest_framework import status from rest_framework.test import APITestCase -from metax_api.models import EditorUserPermission +from metax_api.models import CatalogRecord, EditorUserPermission from metax_api.tests.utils import TestClassUtils, test_data_file_path @@ -36,6 +37,7 @@ def setUp(self): self.editor_user_permission = self._get_whole_object_from_test_data( "editoruserpermission", requested_pk=str(uuid.UUID(int=1)) ) + self.metadata_provider_user = self.cr_from_test_data["fields"]["metadata_provider_user"] self.userid = self.editor_user_permission["fields"]["user_id"] self._use_http_authorization() @@ -180,3 +182,38 @@ def test_write_editor_permission_add_removed_user(self): ) self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual(response.data.get("removed"), False) + + @responses.activate + def test_write_editor_permission_not_dataset_creator(self): + self._mock_token_validation_succeeds() + self._use_http_authorization( + method="bearer", token={"group_names": [], "CSCUserName": "not_dataset_creator"} + ) + data = {"role": "editor", "user_id": "test_editor"} + response = self.client.post( + f"/rest/datasets/{self.crid}/editor_permissions/users", data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) + + @responses.activate + def test_write_editor_permission_provider_user(self): + self._mock_token_validation_succeeds() + self._use_http_authorization( + method="bearer", token={"group_names": [], "CSCUserName": self.metadata_provider_user} + ) + data = {"role": "editor", "user_id": "test_editor"} + response = self.client.post( + f"/rest/datasets/{self.crid}/editor_permissions/users", data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + def test_write_editor_permission_list_service_with_no_write_access(self): + cr = CatalogRecord.objects.get(pk=self.crid) + cr.data_catalog.catalog_record_services_edit = "" + cr.data_catalog.save() + self._set_http_authorization("service") + data = {"role": "editor", "user_id": "test_editor"} + response = self.client.post( + f"/rest/datasets/{self.crid}/editor_permissions/users", data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) From bda109218ce34c2efa4dd57a919dce5c9e0107af Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Fri, 17 Dec 2021 14:46:05 +0200 Subject: [PATCH 129/160] CSCFAIRMETA-1277: Allow qvain-light create and edit editor permissions --- src/metax_api/settings/components/access_control.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 156a5b4f..c380e85e 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -102,6 +102,7 @@ def __lt__(self, other): Role.END_USERS, Role.TPAS, Role.QVAIN, + Role.QVAIN_LIGHT, Role.ETSIN, ] api_permissions.rest.editorpermissions.read = [Role.ALL] @@ -110,6 +111,7 @@ def __lt__(self, other): Role.END_USERS, Role.TPAS, Role.QVAIN, + Role.QVAIN_LIGHT, Role.ETSIN, ] api_permissions.rest.editorpermissions.delete = [ @@ -117,6 +119,7 @@ def __lt__(self, other): Role.END_USERS, Role.TPAS, Role.QVAIN, + Role.QVAIN_LIGHT, Role.ETSIN, ] From ad5dd2db9f346ae0646d94b8746c0ebb8c630757 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Fri, 17 Dec 2021 15:42:18 +0200 Subject: [PATCH 130/160] Added a check for checking empty strings in input prefixes --- src/metax_api/management/commands/mark_files_removed.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/metax_api/management/commands/mark_files_removed.py b/src/metax_api/management/commands/mark_files_removed.py index 22c9ec86..b00bcfcc 100644 --- a/src/metax_api/management/commands/mark_files_removed.py +++ b/src/metax_api/management/commands/mark_files_removed.py @@ -32,6 +32,9 @@ def handle(self, *args, **options): removed_files_sum = 0 for prefix in path_prefixes: + if not prefix.strip(): + logger.info("Prefix is empty. Skipping.") + continue files = File.objects.filter(project_identifier = options["project_identifier"], file_path__startswith = prefix, removed = "f") logger.info(f"Found {len(files)} files to remove in project: {options['project_identifier']} with path prefix: {prefix}") for file in files: From c64f30df4a0f356c5d3de4a6e6ac7638d74de8be Mon Sep 17 00:00:00 2001 From: aptiaine Date: Mon, 20 Dec 2021 12:00:41 +0200 Subject: [PATCH 131/160] Added a missing date_removed timestamp to file removal --- src/metax_api/services/file_service.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/metax_api/services/file_service.py b/src/metax_api/services/file_service.py index ad2d9617..ad0c983e 100755 --- a/src/metax_api/services/file_service.py +++ b/src/metax_api/services/file_service.py @@ -208,7 +208,8 @@ def restore_files(cls, request, file_identifier_list): removed = false, file_deleted = NULL, date_modified = CURRENT_TIMESTAMP, - user_modified = NULL + user_modified = NULL, + date_removed = NULL from (values %s ) as results(id, service_modified, parent_directory_id) @@ -440,8 +441,11 @@ def _mark_files_as_deleted(file_ids): _logger.info("Marking files as removed...") sql_delete_files = """ - update metax_api_file - set removed = true, file_deleted = CURRENT_TIMESTAMP, date_modified = CURRENT_TIMESTAMP + update metax_api_file set + removed = true, + file_deleted = CURRENT_TIMESTAMP, + date_modified = CURRENT_TIMESTAMP, + date_removed = CURRENT_TIMESTAMP where active = true and removed = false and id in %s""" From 7e9b2d0cab81a8b1005e4a5ac2544dcd18997954 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Tue, 21 Dec 2021 15:40:58 +0200 Subject: [PATCH 132/160] CSCFAIRMETA-1244: Added a catalog and service user for SD --- src/metax_api/initialdata/datacatalogs.json | 60 +++++++++++++++++++ .../settings/components/access_control.py | 1 + src/metax_api/settings/components/common.py | 2 + src/metax_api/settings/environments/stable.py | 6 +- 4 files changed, 66 insertions(+), 3 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 72abdeba..a12c5355 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -594,4 +594,64 @@ "catalog_record_services_edit": "metax,eudat", "catalog_record_services_create": "metax,eudat", "catalog_record_services_read": "metax,eudat" +}, +{ + "catalog_json": { + "title": { + "en": "CSC SD services datasets", + "fi": "CSC:n Arkaluonteisen datan palveluiden aineistokatalogi", + }, + "language": [ + { + "title": { + "en": "English language", + "fi": "Englannin kieli", + "sv": "engelska", + "und": "Englannin kieli" + }, + "identifier": "http://lexvo.org/id/iso639-3/eng" + } + ], + "harvested": true, + "publisher": { + "name": { + "en": "CSC Sensitive Data Services for Research", + }, + "homepage": [ + { + "title": { + "en": "Sensitive Data Services for Research - Services for Research - CSC Company Site", + }, + "identifier": "https://research.csc.fi/sensitive-data-services-for-research" + } + ] + }, + "identifier": "urn:nbn:fi:att:data-catalog-sd", + "access_rights": { + "license": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-4.0" + } + ], + "access_type": [ + { + "identifier": "http://uri.suomi.fi/codelist/fairdata/access_type/code/open", + "pref_label": { + "en": "Open", + "fi": "Avoin", + "und": "Avoin" + } + } + ], + "description": { + "en": "Datasets stored in CSC SD services", + "fi": "SisƤltƤƤ aineistoja CSC:n SD-palvelusta" + } + }, + "dataset_versioning": false, + "research_dataset_schema": "att" + }, + "catalog_record_services_edit": "metax,sd", + "catalog_record_services_create": "metax,sd", + "catalog_record_services_read": "metax,sd" }] \ No newline at end of file diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index c380e85e..c3740dcf 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -40,6 +40,7 @@ class Role(Enum): REPOTRONIC = "repotronic" AALTO = "aalto" EUDAT = "eudat" + SD = "sd" def __ge__(self, other): if self.__class__ is other.__class__: diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 02288571..4b2c8b06 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -15,6 +15,7 @@ REPOTRONIC_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-repotronic" AALTO_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-acris" FMI_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-fmi" +SD_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-sd" END_USER_ALLOWED_DATA_CATALOGS = [ IDA_DATA_CATALOG_IDENTIFIER, @@ -196,6 +197,7 @@ {"password": "test-download", "username": "download"}, {"password": "test-eudat", "username": "eudat"}, {"password": "test-jyu", "username": "jyu"}, + {"password": "test-sd", "username": "sd"}, ] SWAGGER_YAML_PATH = env('SWAGGER_YAML_PATH') diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index f5b563b1..87d6590f 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -1,9 +1,9 @@ from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 -api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] -api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] -api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] +api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT, Role.SD] +api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT, Role.SD] +api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT, Role.SD] api_permissions.rest.directories.read += [Role.IDA, Role.QVAIN_LIGHT] From 058094f7f39e25d28f64768fefc0304aef9e65f2 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 23 Dec 2021 11:06:35 +0200 Subject: [PATCH 133/160] Finetuned the FMI catalog details --- src/metax_api/initialdata/datacatalogs.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index aa48b0fe..5e504978 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -523,9 +523,9 @@ { "catalog_json": { "title": { - "en": "Finnish Meteorological Institute catalog", - "fi": "Ilmatieteen laitoksen katalogi", - "sv": "Meteorologiska Institutet register" + "en": "Finnish Meteorological Institute EUDAT B2SHARE catalog", + "fi": "Ilmatieteen laitoksen EUDAT B2SHARE -katalogi", + "sv": "Meteorologiska Institutet EUDAT B2SHARE register" }, "language": [ { @@ -584,8 +584,8 @@ } ], "description": { - "en": "Contains datasets of Finnish Meteorological Institute", - "fi": "Ilmatieteen laitoksen aineistot" + "en": "Contains datasets of Finnish Meteorological Institute harvested from FMI's EUDAT B2SHARE instance", + "fi": "SisƤltƤƤ aineistoja, jotka on kerƤtty Ilmatieteen laitoksen EUDAT B2SHARE -instanssista" } }, "dataset_versioning": false, From 879726976497c76082dfda36121f6e869e52ad9e Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 23 Dec 2021 12:46:33 +0200 Subject: [PATCH 134/160] Updated Reportronic name --- src/metax_api/initialdata/datacatalogs.json | 20 +++++++++---------- src/metax_api/models/catalog_record.py | 6 +++--- .../settings/components/access_control.py | 2 +- src/metax_api/settings/components/common.py | 4 ++-- src/metax_api/settings/environments/stable.py | 6 +++--- .../api/rest/base/views/datasets/write.py | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 72abdeba..115d846d 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -422,8 +422,8 @@ { "catalog_json": { "title": { - "en": "Repotronic catalog", - "fi": "Repotronic katalogi" + "en": "Reportronic catalog", + "fi": "Reportronic katalogi" }, "language": [ { @@ -433,11 +433,11 @@ "harvested": false, "publisher": { "name": { - "en": "Repotronic", - "fi": "Repotronic" + "en": "Reportronic", + "fi": "Reportronic" } }, - "identifier": "urn:nbn:fi:att:data-catalog-repotronic", + "identifier": "urn:nbn:fi:att:data-catalog-reportronic", "access_rights": { "license": [ { @@ -455,16 +455,16 @@ } ], "description": { - "en": "Contains datasets from Repotronic service", - "fi": "SisƤltƤƤ aineistoja Repotronic-palvelusta" + "en": "Contains datasets from Reportronic service", + "fi": "SisƤltƤƤ aineistoja Reportronic-palvelusta" } }, "dataset_versioning": false, "research_dataset_schema": "att" }, - "catalog_record_services_edit": "metax,repotronic", - "catalog_record_services_create": "metax,repotronic", - "catalog_record_services_read": "metax,repotronic" + "catalog_record_services_edit": "metax,reportronic", + "catalog_record_services_create": "metax,reportronic", + "catalog_record_services_read": "metax,reportronic" }, { "catalog_json": { diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index ddfa8f95..f7da8e55 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -1456,11 +1456,11 @@ def _pre_create_operations(self, pid_type=None): elif self.catalog_is_legacy(): if "preferred_identifier" not in self.research_dataset: - # Repotronic catalog does not need to validate unique identifiers - # Raise validation error when not repotronic catalog + # Reportronic catalog does not need to validate unique identifiers + # Raise validation error when not reportronic catalog if ( self.data_catalog.catalog_json["identifier"] - != settings.REPOTRONIC_DATA_CATALOG_IDENTIFIER + != settings.REPORTRONIC_DATA_CATALOG_IDENTIFIER ): raise ValidationError( { diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index c380e85e..5f5dac87 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -37,7 +37,7 @@ class Role(Enum): API_AUTH_USER = "api_auth_user" EXTERNAL = "external" JYU = "jyu" - REPOTRONIC = "repotronic" + REPORTRONIC = "reportronic" AALTO = "aalto" EUDAT = "eudat" diff --git a/src/metax_api/settings/components/common.py b/src/metax_api/settings/components/common.py index 02288571..cd3028d2 100755 --- a/src/metax_api/settings/components/common.py +++ b/src/metax_api/settings/components/common.py @@ -12,7 +12,7 @@ PAS_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-pas" LEGACY_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-legacy" DFT_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-dft" -REPOTRONIC_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-repotronic" +REPORTRONIC_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-reportronic" AALTO_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-acris" FMI_DATA_CATALOG_IDENTIFIER = "urn:nbn:fi:att:data-catalog-fmi" @@ -27,7 +27,7 @@ # catalogs where uniqueness of dataset pids is not enforced. LEGACY_CATALOGS = [ LEGACY_DATA_CATALOG_IDENTIFIER, - REPOTRONIC_DATA_CATALOG_IDENTIFIER, + REPORTRONIC_DATA_CATALOG_IDENTIFIER, ] VALIDATE_TOKEN_URL = env("VALIDATE_TOKEN_URL") diff --git a/src/metax_api/settings/environments/stable.py b/src/metax_api/settings/environments/stable.py index f5b563b1..62512f48 100644 --- a/src/metax_api/settings/environments/stable.py +++ b/src/metax_api/settings/environments/stable.py @@ -1,9 +1,9 @@ from metax_api.settings.components.access_control import Role, api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 -api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] -api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] -api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPOTRONIC, Role.AALTO, Role.EUDAT] +api_permissions.rest.datasets.create += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPORTRONIC, Role.AALTO, Role.EUDAT] +api_permissions.rest.datasets["update"] += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPORTRONIC, Role.AALTO, Role.EUDAT] +api_permissions.rest.datasets.delete += [Role.IDA, Role.QVAIN_LIGHT, Role.JYU, Role.REPORTRONIC, Role.AALTO, Role.EUDAT] api_permissions.rest.directories.read += [Role.IDA, Role.QVAIN_LIGHT] diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index d9a938f1..dd39c091 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -1074,10 +1074,10 @@ def test_catalog_record_deprecation_updates_date_modified(self): self.assertTrue(cr_depr.deprecated) # self.assertEqual(cr_depr.date_modified, cr_depr.date_deprecated, 'date_modified should be updated') - def test_catalog_record_create_repotronic_dataset(self): + def test_catalog_record_create_reportronic_dataset(self): - # Create the repotronic catalog - dc_id = django_settings.REPOTRONIC_DATA_CATALOG_IDENTIFIER + # Create the reportronic catalog + dc_id = django_settings.REPORTRONIC_DATA_CATALOG_IDENTIFIER blueprint_dc = DataCatalog.objects.get(pk=1) catalog_json = blueprint_dc.catalog_json catalog_json["identifier"] = dc_id From 86b470e48d5043250ad7ae1854c59a22c4fba882 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Tue, 28 Dec 2021 12:42:07 +0200 Subject: [PATCH 135/160] Fixed the JSON syntax --- src/metax_api/initialdata/datacatalogs.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 7e8e5901..502c8ff5 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -599,7 +599,7 @@ "catalog_json": { "title": { "en": "CSC SD services datasets", - "fi": "CSC:n Arkaluonteisen datan palveluiden aineistokatalogi", + "fi": "CSC:n Arkaluonteisen datan palveluiden aineistokatalogi" }, "language": [ { From 8cd5e5e298ac27126bb4d8d2b61cef2d9a66a4cd Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Tue, 28 Dec 2021 13:30:09 +0200 Subject: [PATCH 136/160] Yet more JSON syntax fixes in data catalog --- src/metax_api/initialdata/datacatalogs.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 502c8ff5..8fb2b693 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -615,12 +615,12 @@ "harvested": true, "publisher": { "name": { - "en": "CSC Sensitive Data Services for Research", + "en": "CSC Sensitive Data Services for Research" }, "homepage": [ { "title": { - "en": "Sensitive Data Services for Research - Services for Research - CSC Company Site", + "en": "Sensitive Data Services for Research - Services for Research - CSC Company Site" }, "identifier": "https://research.csc.fi/sensitive-data-services-for-research" } From 2c7539da364f60d6984834f542a7b58baa16a326 Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Wed, 5 Jan 2022 09:59:50 +0200 Subject: [PATCH 137/160] CSCFAIRMETA-1290: Add organization to REMS calls * Add REMS organization id to configuration * Check that organization exists in REMS * Use REMS organization in REMS calls that require them * Specify REMS config for unittests environment --- ENV_VARS.md | 1 + src/metax_api/services/rems_service.py | 33 ++++++++++++--- src/metax_api/settings/components/rems.py | 1 + .../settings/environments/unittests.py | 16 ++++++++ .../api/rest/base/views/datasets/write.py | 41 +++++++++++++++++++ 5 files changed, 87 insertions(+), 5 deletions(-) diff --git a/ENV_VARS.md b/ENV_VARS.md index 7afadd56..ec31024b 100755 --- a/ENV_VARS.md +++ b/ENV_VARS.md @@ -56,6 +56,7 @@ copy .env.template to .env and fill the required values from below table. Requir | REMS_FORM_ID | no | | Required if REMS is enabled | | REMS_METAX_USER | no | | Required if REMS is enabled | | REMS_REPORTER_USER | no | | Required if REMS is enabled | +| REMS_ORGANIZATION | no | | Required if REMS is enabled | | SERVER_DOMAIN_NAME | no | metax.fd-dev.csc.fi | | ENABLE_V1_ENDPOINTS | no | True | ENABLE_V2_ENDPOINTS | no | True diff --git a/src/metax_api/services/rems_service.py b/src/metax_api/services/rems_service.py index ffcbeb9f..766d6f8b 100755 --- a/src/metax_api/services/rems_service.py +++ b/src/metax_api/services/rems_service.py @@ -39,6 +39,7 @@ def __init__(self): self.reporter_user = settings["REPORTER_USER"] self.auto_approver = settings["AUTO_APPROVER"] self.form_id = settings["FORM_ID"] + self.organization = settings["ORGANIZATION"] self.headers = { "x-rems-api-key": self.api_key, @@ -60,6 +61,9 @@ def create_rems_entity(self, cr, user_info): """ self.cr = cr + # raise error if configured organization does not exist in REMS + self._get_rems_organization() + # create user. Successful even if userid is already taken self._post_rems("user", user_info) @@ -112,6 +116,12 @@ def _get_catalogue_item(self, rems_id): return rems_ci + def _get_rems_organization(self): + """ + Get configured organization object from REMS. + """ + return self._get_rems("organization", id=self.organization) + def _close_applications(self, rems_id, reason): """ Get all applications that are related to dataset and close them. @@ -153,7 +163,7 @@ def _close_entity(self, entity, id): def _create_workflow(self, user_id): body = { - "organization": self.cr.metadata_owner_org, + "organization": {"organization/id": self.organization}, "title": self.cr.research_dataset["preferred_identifier"], "type": "workflow/default", "handlers": [user_id], @@ -176,7 +186,11 @@ def _create_license(self): if any([v["textcontent"] == license_url for v in lic["localizations"].values()]): return lic["id"] - body = {"licensetype": "link", "localizations": {}} + body = { + "licensetype": "link", + "localizations": {}, + "organization": {"organization/id": self.organization}, + } for lang in list(license["title"].keys()): body["localizations"].update( @@ -190,7 +204,7 @@ def _create_license(self): def _create_resource(self, license_id): body = { "resid": self.cr.rems_identifier, - "organization": self.cr.metadata_owner_org, + "organization": {"organization/id": self.organization}, "licenses": [license_id], } @@ -207,6 +221,7 @@ def _create_catalogue_item(self, res_id, wf_id): "wfid": wf_id, "localizations": {}, "enabled": True, + "organization": {"organization/id": self.organization}, } for lang in list(rd_title.keys()): @@ -273,9 +288,17 @@ def _put_rems(self, entity, action, body): return resp - def _get_rems(self, entity, params=""): + def _get_rems(self, entity, params="", id=None): + """Get list of REMS entities or single entity by id. """ + if id: + id_path = f"/{id}" + else: + id_path = "" + try: - response = requests.get(f"{self.base_url}/{entity}s?{params}", headers=self.headers) + response = requests.get( + f"{self.base_url}/{entity}s{id_path}?{params}", headers=self.headers + ) except Exception as e: raise Exception(f"Connection to REMS failed while getting {entity}. Error: {e}") diff --git a/src/metax_api/settings/components/rems.py b/src/metax_api/settings/components/rems.py index 810c1ec7..abbcd718 100644 --- a/src/metax_api/settings/components/rems.py +++ b/src/metax_api/settings/components/rems.py @@ -19,5 +19,6 @@ REMS["REPORTER_USER"] = env("REMS_REPORTER_USER") REMS["AUTO_APPROVER"] = env("REMS_AUTO_APPROVER") REMS["FORM_ID"] = int(env("REMS_FORM_ID")) + REMS["ORGANIZATION"] = env("REMS_ORGANIZATION") except ImproperlyConfigured as e: logger.warning(e) diff --git a/src/metax_api/settings/environments/unittests.py b/src/metax_api/settings/environments/unittests.py index 631d9182..e6950902 100755 --- a/src/metax_api/settings/environments/unittests.py +++ b/src/metax_api/settings/environments/unittests.py @@ -58,3 +58,19 @@ api_permissions.rpc.files.delete_project.use += [Role.TEST_USER] API_ACCESS = prepare_perm_values(api_permissions.to_dict()) + +from metax_api.settings.components.rems import REMS + +REMS.update( + { + "ENABLED": True, + "API_KEY": "key", + "BASE_URL": "https://mock-rems/api", + "ETSIN_URL_TEMPLATE": "https://etsin.fd-dev.csc.fi/dataset/%s", + "METAX_USER": "rems-metax@example.com", + "REPORTER_USER": "rems-reporter@example.com", + "AUTO_APPROVER": "not-used", + "FORM_ID": 1, + "ORGANIZATION": "rems-test-org", + } +) diff --git a/src/metax_api/tests/api/rest/base/views/datasets/write.py b/src/metax_api/tests/api/rest/base/views/datasets/write.py index dd39c091..6c16b1e7 100755 --- a/src/metax_api/tests/api/rest/base/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/base/views/datasets/write.py @@ -4726,6 +4726,7 @@ def setUp(self): for entity in ["user", "workflow", "license", "resource", "catalogue-item"]: self._mock_rems_write_access_succeeds("POST", entity, "create") + self._mock_rems_read_access_organization_succeeds() self._mock_rems_read_access_succeeds("license") # mock successful rems access for deletion. Add fails later @@ -4777,7 +4778,44 @@ def _mock_rems_write_access_succeeds(self, method, entity, action): status=200, ) + def _mock_rems_read_access_organization_succeeds(self): + resp = { + "archived": False, + "organization/id": django_settings.REMS["ORGANIZATION"], + "organization/short-name": { + "fi": "Test org", + "en": "Test org", + "sv": "Test org" + }, + "organization/review-emails": [], + "enabled": True, + "organization/owners": [], + "organization/modifier": { + "userid": "RDowner@funet.fi", + "name": "RDowner REMSDEMO", + "email": "RDowner.test@test_example.org" + }, + "organization/last-modified": "2022-01-05T00:01:44.034Z", + "organization/name": { + "fi": "Test organization", + "en": "Test organization", + "sv": "Test organization" + } + } + responses.add( + responses.GET, + f"{django_settings.REMS['BASE_URL']}/organizations/{django_settings.REMS['ORGANIZATION']}", + json=resp, + status=200, + ) + def _mock_rems_read_access_succeeds(self, entity): + + organization = { + "organization/id": django_settings.REMS["ORGANIZATION"], + "organization/short-name": {"fi": "Test org", "en": "Test org", "sv": "Test org"}, + "organization/name": {"fi": "Test organization", "en": "Test organization", "sv": "Test organization"}, + } if entity == "license": resp = [ { @@ -4785,6 +4823,7 @@ def _mock_rems_read_access_succeeds(self, entity): "licensetype": "link", "enabled": True, "archived": False, + "organization": organization, "localizations": { "fi": { "title": self.rf["reference_data"]["license"][0]["label"]["fi"], @@ -4801,6 +4840,7 @@ def _mock_rems_read_access_succeeds(self, entity): "licensetype": "link", "enabled": True, "archived": False, + "organization": organization, "localizations": { "en": { "title": self.rf["reference_data"]["license"][1]["label"]["en"], @@ -4814,6 +4854,7 @@ def _mock_rems_read_access_succeeds(self, entity): resp = [ { "archived": False, + "organization": organization, "localizations": { "en": { "id": 18, From beade3567f65dab37041917356a02795c41be34e Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 28 Dec 2021 14:54:18 +0200 Subject: [PATCH 138/160] added api endpoint for flushing datasets created by a certain user and modified flush_project api endpoint. added tests and swagger documentation for both endpoints --- .../api/rpc/base/views/dataset_rpc.py | 42 ++++++++ src/metax_api/api/rpc/base/views/file_rpc.py | 5 +- .../settings/components/access_control.py | 1 + .../settings/environments/production.py | 2 + .../settings/environments/staging.py | 1 + src/metax_api/swagger/v1/swagger.yaml | 38 ++++++++ src/metax_api/swagger/v2/swagger.yaml | 38 ++++++++ .../tests/api/rpc/base/views/dataset_rpc.py | 95 +++++++++++++++++++ .../tests/api/rpc/base/views/file_rpc.py | 71 +++++++++++++- 9 files changed, 291 insertions(+), 2 deletions(-) diff --git a/src/metax_api/api/rpc/base/views/dataset_rpc.py b/src/metax_api/api/rpc/base/views/dataset_rpc.py index 9a8f2336..df35d13e 100755 --- a/src/metax_api/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/api/rpc/base/views/dataset_rpc.py @@ -9,6 +9,7 @@ from json import load from django.conf import settings as django_settings +from django.db import connection from django.http import Http404 from rest_framework import status from rest_framework.decorators import action @@ -198,3 +199,44 @@ def _save_and_publish_dataset(self, cr, action): super(CatalogRecord, cr).save(update_fields=["preservation_identifier"]) DataciteDOIUpdate(cr, cr.preservation_identifier, action)() + + @action(detail=False, methods=["post"], url_path="flush_user_data") + def flush_user_data(self, request): + """ + Permanently delete datasets created by a certain user. + + WARNING! Not meant for active production use!! + """ + if django_settings.ENV == "production": + raise Http400({"detail": ["API currently allowed only in test environments"]}) + + if "metadata_provider_user" not in request.query_params: + raise Http400({"detail": ["metadata_provider_user is a required query parameter"]}) + + user = request.query_params["metadata_provider_user"] + + sql_delete_cr_files = """ + delete from metax_api_catalogrecord_files + where catalogrecord_id in ( + select id from metax_api_catalogrecord where metadata_provider_user = %s + ) + """ + + sql_delete_datasets = """ + delete from metax_api_catalogrecord where metadata_provider_user = %s + """ + + _logger.info( + "Flushing datasets for user %s on the request of user: %s" % (user, request.user.username) + ) + + with connection.cursor() as cr: + cr.execute(sql_delete_cr_files, [user]) + cr.execute(sql_delete_datasets, [user]) + if cr.rowcount == 0: + _logger.info("No datasets found for user %s" % user) + return Response(user, status=status.HTTP_404_NOT_FOUND) + + _logger.info("Permanently deleted all datasets created by user %s" % user) + + return Response(data=None, status=status.HTTP_204_NO_CONTENT) diff --git a/src/metax_api/api/rpc/base/views/file_rpc.py b/src/metax_api/api/rpc/base/views/file_rpc.py index eb8dd169..e4567b98 100755 --- a/src/metax_api/api/rpc/base/views/file_rpc.py +++ b/src/metax_api/api/rpc/base/views/file_rpc.py @@ -33,7 +33,7 @@ def delete_project(self, request): return FileService.delete_project(request.query_params["project_identifier"]) @action(detail=False, methods=["post"], url_path="flush_project") - def flush_project(self, request): # pragma: no cover + def flush_project(self, request): """ Permanently delete an entire project's files and directories. @@ -70,6 +70,9 @@ def flush_project(self, request): # pragma: no cover cr.execute(sql_delete_cr_files, [project]) cr.execute(sql_delete_files, [project]) cr.execute(sql_delete_directories, [project]) + if cr.rowcount == 0: + _logger.info("No files or directories found for project %s" % project) + return Response(project, status=status.HTTP_404_NOT_FOUND) _logger.info("Permanently deleted all files and directories from project %s" % project) diff --git a/src/metax_api/settings/components/access_control.py b/src/metax_api/settings/components/access_control.py index 49a4469e..ebd99fd4 100755 --- a/src/metax_api/settings/components/access_control.py +++ b/src/metax_api/settings/components/access_control.py @@ -165,6 +165,7 @@ def __lt__(self, other): api_permissions.rpc.datasets.create_draft.use = [Role.ALL] api_permissions.rpc.datasets.create_new_version.use = [Role.ALL] api_permissions.rpc.datasets.fix_deprecated.use = [Role.ALL] +api_permissions.rpc.datasets.flush_user_data.use = [Role.METAX, Role.IDA, Role.TPAS] api_permissions.rpc.datasets.get_minimal_dataset_template.use = [Role.ALL] api_permissions.rpc.datasets.merge_draft.use = [Role.ALL] api_permissions.rpc.datasets.publish_dataset.use = [Role.ALL] diff --git a/src/metax_api/settings/environments/production.py b/src/metax_api/settings/environments/production.py index cf5223cb..30c9b856 100644 --- a/src/metax_api/settings/environments/production.py +++ b/src/metax_api/settings/environments/production.py @@ -1,6 +1,8 @@ from metax_api.settings.environments.stable import api_permissions, prepare_perm_values from metax_api.settings.environments.staging import API_USERS # noqa: F401 +api_permissions.rpc.datasets.flush_user_data.use.clear() + api_permissions.rpc.files.flush_project.use.clear() API_ACCESS = prepare_perm_values(api_permissions.to_dict()) diff --git a/src/metax_api/settings/environments/staging.py b/src/metax_api/settings/environments/staging.py index e9c1675a..88ca063e 100644 --- a/src/metax_api/settings/environments/staging.py +++ b/src/metax_api/settings/environments/staging.py @@ -31,6 +31,7 @@ api_permissions.rest.filestorages.delete = [Role.ALL] api_permissions.rpc.datasets.set_preservation_identifier.use = [Role.ALL] +api_permissions.rpc.datasets.flush_user_data.use = [Role.ALL] api_permissions.rpc.files.delete_project.use = [Role.ALL] api_permissions.rpc.files.flush_project.use = [Role.ALL] diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index fdd6bdfc..8aa085c9 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -1820,6 +1820,25 @@ paths: description: Successful operation, returns information about the new dataset version. tags: - Dataset RPC + /rpc/datasets/flush_user_data: + post: + summary: Flushes user's datasets + description: Usable by ida, tpas and metax users. Permanently deletes datasets created by a certain user. Only for non-production use. + parameters: + - name: metadata_provider_user + in: query + description: Identifies the user whose datasets will be flushed. + required: true + type: string + responses: + '204': + description: Successful operation, returns no content. + '400': + description: Required parameter was missing or API was called in production environment. Response includes details. + '404': + description: No datasets were found for requested user. + tags: + - Dataset RPC /rpc/statistics/count_datasets: get: summary: Get the total number of datasets and file sizes. @@ -1992,6 +2011,25 @@ paths: description: Unsuccesful operation when project identifier is missing. tags: - File RPC + /rpc/files/flush_project: + post: + summary: Flushes given project from the database. + description: Usable by ida, tpas and metax users. Permanently deletes given project's files and directories. Only for non-production use. + parameters: + - name: project_identifier + in: query + description: Identifies the project whose files and directories will be flushed. + required: true + type: string + responses: + '204': + description: Successful operation, returns no content. + '400': + description: Required parameter was missing or API was called in production environment. Response includes details. + '404': + description: No files or directories were found for requested project. + tags: + - File RPC parameters: diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index f7c29ee7..f93683a6 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -2100,6 +2100,25 @@ paths: description: Not found tags: - Dataset RPC + /rpc/v2/datasets/flush_user_data: + post: + summary: Flushes user's datasets + description: Usable by ida, tpas and metax users. Permanently deletes datasets created by a certain user. Only for non-production use. + parameters: + - name: metadata_provider_user + in: query + description: Identifies the user whose datasets will be flushed. + required: true + type: string + responses: + '204': + description: Successful operation, returns no content. + '400': + description: Required parameter was missing or API was called in production environment. Response includes details. + '404': + description: No datasets were found for requested user. + tags: + - Dataset RPC /rpc/v2/statistics/count_datasets: get: summary: Get the total number of datasets and file sizes. @@ -2272,6 +2291,25 @@ paths: description: Unsuccesful operation when project identifier is missing. tags: - File RPC + /rpc/v2/files/flush_project: + post: + summary: Flushes given project from the database. + description: Usable by ida, tpas and metax users. Permanently deletes given project's files and directories. Only for non-production use. + parameters: + - name: project_identifier + in: query + description: Identifies the project whose files and directories will be flushed. + required: true + type: string + responses: + '204': + description: Successful operation, returns no content. + '400': + description: Required parameter was missing or API was called in production environment. Response includes details. + '404': + description: No files or directories were found for requested project. + tags: + - File RPC parameters: diff --git a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py index ab033762..f2d1230c 100755 --- a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py @@ -660,3 +660,98 @@ def test_fix_deprecated_nested_directories_3(self): self.assertTrue( "/TestExperiment/Directory_2/Group_2/Group_2_deeper/file_12.txt" not in rd_filepaths ) + +class FlushUserDataTests(CatalogRecordApiWriteCommon): + """ + Tests that datasets created by a certain user can be flushed. + """ + + def test_existing_users_datasets_with_no_files_are_flushed(self): + self._use_http_authorization("metax") + + # create new dataset for user "abcd-1234" + username = "abcd-1234" + self.cr_test_data["metadata_provider_user"] = username + self.cr_test_data["research_dataset"].pop("files", None) + self.cr_test_data["research_dataset"].pop("directories", None) + response = self.client.post( + "/rest/v2/datasets?draft=false", self.cr_test_data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + # make sure the dataset exists now + cr = response.data + response = self.client.get("/rest/datasets/{}".format(cr["identifier"]), format="json") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["metadata_provider_user"], username) + + # flush user data for user "abcd-1234" and check that this succeeds + response = self.client.post("/rpc/datasets/flush_user_data?metadata_provider_user=%s" % username) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) + + response = self.client.get("/rest/datasets/{}".format(cr["identifier"]), format="json") + self.assertEqual("not found" in response.json()["detail"].lower(), True) + + def test_existing_users_datasets_with_files_are_flushed(self): + self._use_http_authorization("metax") + + # create new dataset for user "abcde-12345" + username = "abcde-12345" + self.cr_test_data["metadata_provider_user"] = username + response = self.client.post( + "/rest/v2/datasets?draft=false", self.cr_test_data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + # make sure the dataset exists now + cr = response.data + response = self.client.get("/rest/datasets/{}".format(cr["identifier"]), format="json") + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["metadata_provider_user"], username) + + # save file related to dataset to later make sure it still exists + response = self.client.get("/rest/datasets/{}/files".format(cr["identifier"]), format="json") + cr_file = response.data[0]["identifier"] + + # flush user data for user "abcde-12345" and check that this succeeds + response = self.client.post("/rpc/datasets/flush_user_data?metadata_provider_user=%s" % username) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data) + + response = self.client.get("/rest/datasets/{}".format(cr["identifier"]), format="json") + self.assertEqual("not found" in response.json()["detail"].lower(), True) + + # check that the file still exists (flushing datasets should not result to files being deleted) + response = self.client.get("/rest/files/{}".format(cr_file), format="json") + self.assertEqual(response.status_code, status.HTTP_200_OK) + + def test_incorrect_parameters(self): + self._use_http_authorization("metax") + + # non existing username should return HTTP 404 + username = "doesnt_exist_123" + response = self.client.post("/rpc/datasets/flush_user_data?metadata_provider_user=%s" % username) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data) + + # no metadata provider user should return HTTP 400 + response = self.client.post("/rpc/datasets/flush_user_data") + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + # wrong request method (delete instead of post) + username = "abcde-12345" + self.cr_test_data["metadata_provider_user"] = username + response = self.client.post( + "/rest/v2/datasets?draft=false", self.cr_test_data, format="json" + ) + self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) + + response = self.client.delete( + "/rpc/datasets/flush_user_data?metadata_provider_user=%s" % username + ) + self.assertEqual(response.status_code, 501) + + # wrong user + self._use_http_authorization("api_auth_user") + response = self.client.post( + "/rpc/datasets/flush_user_data?metadata_provider_user=%s" % username + ) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) diff --git a/src/metax_api/tests/api/rpc/base/views/file_rpc.py b/src/metax_api/tests/api/rpc/base/views/file_rpc.py index 1e22da54..a380cb79 100755 --- a/src/metax_api/tests/api/rpc/base/views/file_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/file_rpc.py @@ -50,7 +50,7 @@ def test_wrong_parameters(self): response = self.client.post( "/rpc/files/delete_project?project_identifier=research_project_112" ) - # self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_known_project_identifier(self): response = self.client.post( @@ -78,3 +78,72 @@ def test_datasets_are_marked_deprecated(self): self.client.post("/rpc/files/delete_project?project_identifier=project_x") response = self.client.get("/rest/datasets/%s" % related_dataset.identifier) self.assertEqual(response.data["deprecated"], True) + + +class FlushProjectTests(FileRPCTests): + + """ + Checks that an entire project's files and directories can be deleted. + """ + + def test_wrong_parameters(self): + # correct user, no project identifier + self._use_http_authorization("metax") + response = self.client.post("/rpc/files/flush_project") + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + # nonexisting project identifier: + response = self.client.post("/rpc/files/flush_project?project_identifier=non_existing") + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + # wrong request method + response = self.client.delete( + "/rpc/files/flush_project?project_identifier=research_project_112" + ) + self.assertEqual(response.status_code, 501) + + # wrong user + self._use_http_authorization("api_auth_user") + response = self.client.post( + "/rpc/files/flush_project?project_identifier=research_project_112" + ) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) + + def test_known_project_identifier(self): + self._use_http_authorization("metax") + response = self.client.post( + "/rpc/files/flush_project?project_identifier=research_project_112" + ) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + + def test_files_are_deleted(self): + self._use_http_authorization("metax") + + # make sure project has files before deleting them + files_count_before = File.objects.filter(project_identifier="research_project_112").count() + self.assertNotEqual(files_count_before, 0) + + # delete all files for project + response = self.client.post( + "/rpc/files/flush_project?project_identifier=research_project_112" + ) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + + # make sure all files are now deleted + files_count_after = File.objects.filter(project_identifier="research_project_112").count() + self.assertEqual(files_count_after, 0) + + def test_directories_are_deleted(self): + self._use_http_authorization("metax") + + # make sure project has directories before deleting them + dirs_count_before = Directory.objects.filter(project_identifier="research_project_112").count() + self.assertNotEqual(dirs_count_before, 0) + + # delete all directories for project + response = self.client.post("/rpc/files/flush_project?project_identifier=research_project_112") + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + + # make sure all directories are now deleted + dirs_count_after = Directory.objects.filter(project_identifier="research_project_112").count() + self.assertEqual(dirs_count_after, 0) \ No newline at end of file From 2a5a0f1f12e0d6afab06de15e627ec1daf7283ae Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Thu, 13 Jan 2022 12:15:50 +0200 Subject: [PATCH 139/160] CSCFAIRMETA-1286: Added publish fields to datacatalogs and use them when publishing to RabbitMQ --- docker-compose.yml | 2 + .../serializers/data_catalog_serializer.py | 2 + src/metax_api/initialdata/datacatalogs.json | 60 ++++++++++------ .../0045_add_publish_fields_to_catalogs.py | 50 +++++++++++++ src/metax_api/models/catalog_record.py | 41 +++++++---- src/metax_api/models/data_catalog.py | 7 ++ .../tests/api/rest/v2/views/datasets/write.py | 72 ++++++++++++++++++- 7 files changed, 199 insertions(+), 35 deletions(-) create mode 100644 src/metax_api/migrations/0045_add_publish_fields_to_catalogs.py diff --git a/docker-compose.yml b/docker-compose.yml index 7e37522f..b70b717c 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -51,6 +51,8 @@ services: metax-rabbitmq: image: rabbitmq:3-management hostname: metax-rabbitmq + ports: + - 15672:15672 volumes: - metax-rabbitmq:/var/lib/rabbitmq diff --git a/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py b/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py index bd6294ad..0b8dcfab 100755 --- a/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py +++ b/src/metax_api/api/rest/base/serializers/data_catalog_serializer.py @@ -28,6 +28,8 @@ class Meta: "catalog_record_services_create", "catalog_record_group_read", "catalog_record_services_read", + "publish_to_ttv", + "publish_to_etsin", ) + CommonSerializer.Meta.fields extra_kwargs = CommonSerializer.Meta.extra_kwargs diff --git a/src/metax_api/initialdata/datacatalogs.json b/src/metax_api/initialdata/datacatalogs.json index 8fb2b693..4a604446 100755 --- a/src/metax_api/initialdata/datacatalogs.json +++ b/src/metax_api/initialdata/datacatalogs.json @@ -71,9 +71,11 @@ "research_dataset_schema": "ida", "logo": "fairdata_tree_logo.svg" }, - "catalog_record_services_edit": "ida,metax,qvain,qvain-light,tpas", - "catalog_record_services_create": "ida,metax,qvain,qvain-light,tpas", - "catalog_record_services_read": "ida,metax,qvain,qvain-light,etsin,tpas,download" + "catalog_record_services_edit": "ida,metax,qvain,qvain-light,tpas", + "catalog_record_services_create": "ida,metax,qvain,qvain-light,tpas", + "catalog_record_services_read": "ida,metax,qvain,qvain-light,etsin,tpas,download", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -148,9 +150,11 @@ "research_dataset_schema": "att", "logo": "fairdata_tree_logo.svg" }, - "catalog_record_services_edit": "ida,metax,qvain,qvain-light,tpas", - "catalog_record_services_create": "ida,metax,qvain,qvain-light,tpas", - "catalog_record_services_read": "ida,metax,qvain,qvain-light,etsin,tpas,download" + "catalog_record_services_edit": "ida,metax,qvain,qvain-light,tpas", + "catalog_record_services_create": "ida,metax,qvain,qvain-light,tpas", + "catalog_record_services_read": "ida,metax,qvain,qvain-light,etsin,tpas,download", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -226,11 +230,13 @@ "research_dataset_schema": "ida", "logo": "fairdata_tree_logo.svg" }, - "catalog_record_group_edit": "pas_edit", - "catalog_record_group_create": "pas_create", - "catalog_record_services_edit": "metax,tpas", - "catalog_record_services_create": "metax,tpas", - "catalog_record_services_read": "metax,tpas,download" + "catalog_record_group_edit": "pas_edit", + "catalog_record_group_create": "pas_create", + "catalog_record_services_edit": "metax,tpas", + "catalog_record_services_create": "metax,tpas", + "catalog_record_services_read": "metax,tpas,download", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -298,7 +304,9 @@ }, "catalog_record_services_edit": "metax,qvain,qvain-light,tpas", "catalog_record_services_create": "metax,qvain,qvain-light,tpas", - "catalog_record_services_read": "metax,qvain,qvain-light,tpas,download" + "catalog_record_services_read": "metax,qvain,qvain-light,tpas,download", + "publish_to_etsin": false, + "publish_to_ttv": false }, { "catalog_json": { @@ -354,9 +362,11 @@ "dataset_versioning": false, "research_dataset_schema": "dft" }, - "catalog_record_services_edit": "metax,qvain,qvain-light,tpas, ida, testuser, api_auth_user", - "catalog_record_services_create": "metax,qvain,qvain-light,tpas, ida, testuser, api_auth_user", - "catalog_record_services_read": "metax,qvain,qvain-light,etsin,tpas, ida, testuser, api_auth_user,download" + "catalog_record_services_edit": "metax,qvain,qvain-light,tpas, ida, testuser, api_auth_user", + "catalog_record_services_create": "metax,qvain,qvain-light,tpas, ida, testuser, api_auth_user", + "catalog_record_services_read": "metax,qvain,qvain-light,etsin,tpas, ida, testuser, api_auth_user,download", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -417,7 +427,9 @@ }, "catalog_record_services_edit": "metax,jyu", "catalog_record_services_create": "metax,jyu", - "catalog_record_services_read": "metax,jyu,download" + "catalog_record_services_read": "metax,jyu,download", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -464,7 +476,9 @@ }, "catalog_record_services_edit": "metax,reportronic", "catalog_record_services_create": "metax,reportronic", - "catalog_record_services_read": "metax,reportronic" + "catalog_record_services_read": "metax,reportronic", + "publish_to_etsin": false, + "publish_to_ttv": true }, { "catalog_json": { @@ -518,7 +532,9 @@ }, "catalog_record_services_edit": "metax,aalto", "catalog_record_services_create": "metax,aalto", - "catalog_record_services_read": "metax,aalto" + "catalog_record_services_read": "metax,aalto", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -593,7 +609,9 @@ }, "catalog_record_services_edit": "metax,eudat", "catalog_record_services_create": "metax,eudat", - "catalog_record_services_read": "metax,eudat" + "catalog_record_services_read": "metax,eudat", + "publish_to_etsin": true, + "publish_to_ttv": true }, { "catalog_json": { @@ -653,5 +671,7 @@ }, "catalog_record_services_edit": "metax,sd", "catalog_record_services_create": "metax,sd", - "catalog_record_services_read": "metax,sd" + "catalog_record_services_read": "metax,sd", + "publish_to_etsin": true, + "publish_to_ttv": true }] \ No newline at end of file diff --git a/src/metax_api/migrations/0045_add_publish_fields_to_catalogs.py b/src/metax_api/migrations/0045_add_publish_fields_to_catalogs.py new file mode 100644 index 00000000..6c09ef02 --- /dev/null +++ b/src/metax_api/migrations/0045_add_publish_fields_to_catalogs.py @@ -0,0 +1,50 @@ +from django.db import migrations, models + +import logging + +logger = logging.getLogger(__name__) + + +def add_publish_fields_to_catalogs(apps, schema_editor): + logger.info("Add publish_to_etsin and publish_to_ttv fields to data catalogs") + + dont_publish_to_etsin = ["urn:nbn:fi:att:data-catalog-legacy", "urn:nbn:fi:att:data-catalog-reportronic", "urn:nbn:fi:att:data-catalog-repotronic"] + dont_publish_to_ttv = ["urn:nbn:fi:att:data-catalog-legacy"] + + DataCatalog = apps.get_model('metax_api', 'DataCatalog') + catalogs = DataCatalog.objects.all() + + for catalog in catalogs: + if catalog.catalog_json["identifier"] in dont_publish_to_etsin: + catalog.publish_to_etsin = False + + if catalog.catalog_json["identifier"] in dont_publish_to_ttv: + catalog.publish_to_ttv = False + + catalog.save() + + +def revert(apps, schema_editor): + pass + + + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0044_change_aalto_catalog_to_harvested'), + ] + + operations = [ + migrations.AddField( + model_name='datacatalog', + name='publish_to_etsin', + field=models.BooleanField(default=True), + ), + migrations.AddField( + model_name='datacatalog', + name='publish_to_ttv', + field=models.BooleanField(default=True), + ), + migrations.RunPython(add_publish_fields_to_catalogs, revert), + ] diff --git a/src/metax_api/models/catalog_record.py b/src/metax_api/models/catalog_record.py index f7da8e55..0c7dd9df 100755 --- a/src/metax_api/models/catalog_record.py +++ b/src/metax_api/models/catalog_record.py @@ -1315,6 +1315,12 @@ def preservation_dataset_origin_version_exists(self): """ return hasattr(self, "preservation_dataset_origin_version") + def catalog_publishes_to_etsin(self): + return self.data_catalog.publish_to_etsin + + def catalog_publishes_to_ttv(self): + return self.data_catalog.publish_to_ttv + def catalog_versions_datasets(self): return self.data_catalog.catalog_json.get("dataset_versioning", False) is True @@ -3158,11 +3164,6 @@ def __call__(self): """ from metax_api.services import RabbitMQService as rabbitmq - _logger.info( - "Publishing CatalogRecord %s to RabbitMQ... routing_key: %s" - % (self.cr.identifier, self.routing_key) - ) - if self.routing_key == "delete": cr_json = {"identifier": self.cr.identifier} else: @@ -3171,15 +3172,27 @@ def __call__(self): cr_json["data_catalog"] = {"catalog_json": self.cr.data_catalog.catalog_json} try: - for exchange in settings.RABBITMQ["EXCHANGES"]: - do_publish = True - if self.cr.catalog_is_legacy(): - do_publish = False - - if do_publish: - rabbitmq.publish( - cr_json, routing_key=self.routing_key, exchange=exchange["NAME"] - ) + if self.cr.catalog_publishes_to_etsin(): + + _logger.info( + "Publishing CatalogRecord %s to RabbitMQ... exchange: datasets, routing_key: %s" + % (self.cr.identifier, self.routing_key) + ) + + rabbitmq.publish( + cr_json, routing_key=self.routing_key, exchange="datasets" + ) + if self.cr.catalog_publishes_to_ttv(): + + _logger.info( + "Publishing CatalogRecord %s to RabbitMQ... exchange: ttv-datasets, routing_key: %s" + % (self.cr.identifier, self.routing_key) + ) + + rabbitmq.publish( + cr_json, routing_key=self.routing_key, exchange="ttv-datasets" + ) + except: # note: if we'd like to let the request be a success even if this operation fails, # we could simply not raise an exception here. diff --git a/src/metax_api/models/data_catalog.py b/src/metax_api/models/data_catalog.py index 37e2c100..bd1b5a86 100755 --- a/src/metax_api/models/data_catalog.py +++ b/src/metax_api/models/data_catalog.py @@ -59,6 +59,13 @@ class DataCatalog(Common): help_text="Services which are allowed to read catalog records in the catalog.", ) + publish_to_etsin = models.BooleanField( + default=True + ) + publish_to_ttv = models.BooleanField( + default=True + ) + # END OF MODEL FIELD DEFINITIONS # READ_METHODS = ("GET", "HEAD", "OPTIONS") diff --git a/src/metax_api/tests/api/rest/v2/views/datasets/write.py b/src/metax_api/tests/api/rest/v2/views/datasets/write.py index 43f3d012..7f30506f 100755 --- a/src/metax_api/tests/api/rest/v2/views/datasets/write.py +++ b/src/metax_api/tests/api/rest/v2/views/datasets/write.py @@ -13,6 +13,7 @@ from rest_framework.test import APITestCase from metax_api.models import CatalogRecordV2, DataCatalog +from metax_api.services import RabbitMQService from metax_api.tests.utils import TestClassUtils, test_data_file_path CR = CatalogRecordV2 @@ -654,4 +655,73 @@ def test_external_service_can_not_add_catalog_record_to_other_catalog(self): self.cr_test_data["research_dataset"]["preferred_identifier"] = "temp-pid" response = self.client.post("/rest/v2/datasets", self.cr_test_data, format="json") - self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) \ No newline at end of file + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) + +class CatalogRecordRabbitMQPublish(CatalogRecordApiWriteCommon): + """ + Testing if RabbitMQ messages are published to correct exchanges with correct routing keys + when creating, updating, or deleting a catalog record. + Uses a dummy RabbitMQService. + """ + + def setUp(self): + super().setUp() + + def _check_rabbitmq_queue(self, routing_key, publish_to_etsin, publish_to_ttv): + """ + Checks if a message with a given routing key exists in the correct exchange + in the dummy RabbitMQService queue + """ + messages_str = ''.join(str(message) for message in RabbitMQService.messages) + assert_str_etsin = f"'routing_key': '{routing_key}', 'exchange': 'datasets'" + assert_str_ttv = f"'routing_key': '{routing_key}', 'exchange': 'ttv-datasets'" + + self.assertEqual(assert_str_etsin in messages_str, publish_to_etsin) + self.assertEqual(assert_str_ttv in messages_str, publish_to_ttv) + + + + def test_rabbitmq_publish(self): + """ + Creates four different data catalogs and creates, updates, and deletes + catalog records in them. Checks that RabbitMQ messages are published + in correct exchanges with correct routing keys. + """ + param_list = [(True, True), (True, False), (False, True), (False, False)] + for publish_to_etsin, publish_to_ttv in param_list: + with self.subTest(): + + # Create the data catalog + dc = self._get_object_from_test_data("datacatalog", 4) + dc_id = f"urn:nbn:fi:att:data-catalog-att-{publish_to_etsin}-{publish_to_ttv}" + dc["catalog_json"]["identifier"] = dc_id + dc["publish_to_etsin"] = publish_to_etsin + dc["publish_to_ttv"] = publish_to_ttv + dc = self.client.post("/rest/v2/datacatalogs", dc, format="json").data + + # Create the catalog record + cr = self._get_new_full_test_att_cr_data() + cr["data_catalog"] = dc + + cr = self.client.post("/rest/v2/datasets", cr, format="json").data + self._check_rabbitmq_queue("create", publish_to_etsin, publish_to_ttv) + + # Empty the queue + RabbitMQService.messages = [] + + # Update the catalog record + cr["research_dataset"]["description"] = { + "en": "Updating the description" + } + + response = self.client.put(f"/rest/v2/datasets/{cr['id']}", cr, format="json") + self._check_rabbitmq_queue("update", publish_to_etsin, publish_to_ttv) + + RabbitMQService.messages = [] + + # Delete the catalog record + response = self.client.delete(f"/rest/v2/datasets/{cr['id']}") + self._check_rabbitmq_queue("delete", publish_to_etsin, publish_to_ttv) + + RabbitMQService.messages = [] + From ff82584097c818b598a37bd7db6693fabd62558c Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Tue, 18 Jan 2022 10:02:04 +0200 Subject: [PATCH 140/160] CSCFAIRMETA-1292: Fixed 'If-Modified-Since' header functionality --- src/metax_api/api/rest/base/views/common_view.py | 4 ++-- .../tests/api/rest/base/views/common/read.py | 13 +++++++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/metax_api/api/rest/base/views/common_view.py b/src/metax_api/api/rest/base/views/common_view.py index cb28087a..053a5a40 100755 --- a/src/metax_api/api/rest/base/views/common_view.py +++ b/src/metax_api/api/rest/base/views/common_view.py @@ -176,11 +176,11 @@ def get_queryset(self): q_filters = [] deduplicated_q_filters = [] - CS.set_if_modified_since_filter(self.request, additional_filters) - if hasattr(self, "queryset_search_params"): additional_filters.update(**self.queryset_search_params) + CS.set_if_modified_since_filter(self.request, additional_filters) + if "q_filters" in additional_filters: # Q-filter objects, which can contain more complex filter options such as OR-clauses q_filters = additional_filters.pop("q_filters") diff --git a/src/metax_api/tests/api/rest/base/views/common/read.py b/src/metax_api/tests/api/rest/base/views/common/read.py index 6d4887d7..779508ca 100755 --- a/src/metax_api/tests/api/rest/base/views/common/read.py +++ b/src/metax_api/tests/api/rest/base/views/common/read.py @@ -142,7 +142,6 @@ def test_pagination_ordering(self): self.assertEqual(from_api, from_db) - class ApiReadHTTPHeaderTests(CatalogRecordApiReadCommon): # # header if-modified-since tests, single @@ -193,7 +192,7 @@ def test_get_with_if_modified_since_header_syntax_error(self): # List operation returns always 200 even if no datasets match the if-modified-since criterium - def test_list_get_with_if_modified_since_header_ok(self): + def _test_list_get_with_if_modified_since_header_ok(self): cr = CatalogRecord.objects.get(pk=self.pk) date_modified = cr.date_modified date_modified_in_gmt = timezone.localtime(date_modified, timezone=tz("GMT")) @@ -234,6 +233,16 @@ def test_list_get_with_if_modified_since_header_ok(self): self.assertTrue(len(response.data.get("results")) > 6) self.assertTrue(len(response.data.get("results")) == 28) + def test_list_get_with_if_modified_since_header_ok_as_unauthenticated_user(self): + + self._set_http_authorization("no") + self._test_list_get_with_if_modified_since_header_ok() + + def test_list_get_with_if_modified_since_header_ok_as_authenticated_user(self): + + self._use_http_authorization() + self._test_list_get_with_if_modified_since_header_ok() + class ApiReadQueryParamTests(CatalogRecordApiReadCommon): From b9ccf5fc3c4cc2a3621c64b3c452b8c030196e26 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Tue, 4 Jan 2022 13:23:33 +0200 Subject: [PATCH 141/160] added more descriptive response detail for HTTP 403 responses --- src/metax_api/api/rest/base/views/dataset_view.py | 12 ++++++------ src/metax_api/api/rest/base/views/directory_view.py | 2 +- src/metax_api/api/rest/v2/views/api_error_view.py | 2 +- src/metax_api/api/rest/v2/views/dataset_view.py | 2 +- src/metax_api/middleware/identifyapicaller.py | 12 ++++++------ 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/metax_api/api/rest/base/views/dataset_view.py b/src/metax_api/api/rest/base/views/dataset_view.py index 1aeaab0b..648feb45 100755 --- a/src/metax_api/api/rest/base/views/dataset_view.py +++ b/src/metax_api/api/rest/base/views/dataset_view.py @@ -271,7 +271,7 @@ def _search_using_dataset_identifiers(self): @action(detail=True, methods=["get"], url_path="redis") def redis_test(self, request, pk=None): # pragma: no cover if request.user.username != "metax": - raise Http403() + raise Http403({"detail": ["Access denied."]}) try: cached = self.cache.get("cr-1211%s" % pk) except: @@ -296,7 +296,7 @@ def redis_test(self, request, pk=None): # pragma: no cover @action(detail=True, methods=["get"], url_path="rabbitmq") def rabbitmq_test(self, request, pk=None): # pragma: no cover if request.user.username != "metax": - raise Http403() + raise Http403({"detail": ["Access denied."]}) rabbitmq.publish({"msg": "hello create"}, routing_key="create", exchange="datasets") rabbitmq.publish({"msg": "hello update"}, routing_key="update", exchange="datasets") return Response(data={}, status=status.HTTP_200_OK) @@ -312,7 +312,7 @@ def update_cr_total_files_byte_sizes(self, request): :return: """ if request.user.username != "metax": - raise Http403() + raise Http403({"detail": ["Access denied."]}) # Get all IDs for ida data catalogs ida_catalog_ids = [] for dc in DataCatalog.objects.filter( @@ -341,7 +341,7 @@ def update_cr_directory_browsing_data(self, request): :return: """ if request.user.username != "metax": - raise Http403() + raise Http403({"detail": ["Access denied."]}) if "id" in request.query_params: # in order to update one record only, use query param ?id=integer. useful for testcases @@ -390,7 +390,7 @@ def flush_password(self, request): # pragma: no cover with open("/home/metax-user/flush_password", "w") as f: dump(request.data, f) else: - raise Http403 + raise Http403({"detail": ["Access denied."]}) _logger.debug("FLUSH password set") return Response(data=None, status=status.HTTP_204_NO_CONTENT) @@ -422,4 +422,4 @@ def flush_records(self, request): # pragma: no cover _logger.debug("FLUSH called by %s" % request.user.username) return Response(data=None, status=status.HTTP_204_NO_CONTENT) - return Response(data=None, status=status.HTTP_403_FORBIDDEN) + return Response({"error": "Access denied"}, status=status.HTTP_403_FORBIDDEN) diff --git a/src/metax_api/api/rest/base/views/directory_view.py b/src/metax_api/api/rest/base/views/directory_view.py index 587363c2..1f45461e 100755 --- a/src/metax_api/api/rest/base/views/directory_view.py +++ b/src/metax_api/api/rest/base/views/directory_view.py @@ -163,7 +163,7 @@ def update_byte_sizes_and_file_counts(self, request): # pragma: no cover correct mistakes in real data. """ if request.user.username != "metax": - raise Http403 + raise Http403({"detail": ["Access denied."]}) for p in ( Directory.objects.all() diff --git a/src/metax_api/api/rest/v2/views/api_error_view.py b/src/metax_api/api/rest/v2/views/api_error_view.py index c0e477d4..ac926d70 100644 --- a/src/metax_api/api/rest/v2/views/api_error_view.py +++ b/src/metax_api/api/rest/v2/views/api_error_view.py @@ -29,7 +29,7 @@ class ApiErrorViewSetV2(CommonViewSet): def initial(self, request, *args, **kwargs): if request.user.username != "metax": - raise Http403 + raise Http403({"detail": ["Access denied."]}) return super().initial(request, *args, **kwargs) @action(detail=False, methods=["post"], url_path="flush") diff --git a/src/metax_api/api/rest/v2/views/dataset_view.py b/src/metax_api/api/rest/v2/views/dataset_view.py index 84eb1776..29e361e1 100755 --- a/src/metax_api/api/rest/v2/views/dataset_view.py +++ b/src/metax_api/api/rest/v2/views/dataset_view.py @@ -41,7 +41,7 @@ def projects_list(self, request, pk=None): cr = self.get_object() if not cr.user_is_privileged(request): - raise Http403 + raise Http403({"detail": ["Access denied."]}) projects = [ p diff --git a/src/metax_api/middleware/identifyapicaller.py b/src/metax_api/middleware/identifyapicaller.py index 85b04e36..635de74a 100755 --- a/src/metax_api/middleware/identifyapicaller.py +++ b/src/metax_api/middleware/identifyapicaller.py @@ -108,7 +108,7 @@ def _identify_api_caller(self, request): "Unauthenticated access attempt from ip: %s. Authorization header missing" % request.META["REMOTE_ADDR"] ) - raise Http403 + raise Http403({"detail": ["Access denied."]}) try: auth_method, auth_b64 = http_auth_header.split(" ") @@ -156,10 +156,10 @@ def _auth_basic(self, request, auth_b64): if not user: _logger.warning("Failed authnz for user %s: user not found" % username) - raise Http403 + raise Http403({"detail": ["Access denied."]}) if apikey != user["password"]: _logger.warning("Failed authnz for user %s: password mismatch" % username) - raise Http403 + raise Http403({"detail": ["Access denied."]}) request.user.username = username request.user.is_service = True @@ -178,7 +178,7 @@ def _auth_bearer(self, request, auth_b64): if response.status_code != 200: _logger.warning("Bearer token validation failed") - raise Http403 + raise Http403({"detail": ["Access denied."]}) try: token = self._extract_id_token(auth_b64) @@ -186,13 +186,13 @@ def _auth_bearer(self, request, auth_b64): # the above method should never fail, as this code should not be # reachable if the token validation had already failed. _logger.exception("Failed to extract token from id_token string") - raise Http403 + raise Http403({"detail": ["Access denied."]}) if len(token.get("CSCUserName", "")) > 0: request.user.username = token["CSCUserName"] else: _logger.warning("id_token does not contain valid user id: fairdataid or cscusername") - raise Http403 + raise Http403({"detail": ["Access denied."]}) request.user.is_service = False request.user.token = token From be6d7ded109eddd815f1e904707d19d7bc50f4c4 Mon Sep 17 00:00:00 2001 From: Sini Saarinen Date: Thu, 26 Aug 2021 16:06:38 +0300 Subject: [PATCH 142/160] Added 3 new parameters to rpc api organization_datasets_cumulative as well as tests for this. Added new parameters to Swagger documentation. --- .../api/rpc/base/views/statistic_rpc.py | 5 ++ src/metax_api/services/statistic_service.py | 45 +++++++++----- src/metax_api/swagger/v1/swagger.yaml | 3 + src/metax_api/swagger/v2/swagger.yaml | 3 + .../tests/api/rpc/base/views/statistic_rpc.py | 58 +++++++++++++++++++ 5 files changed, 100 insertions(+), 14 deletions(-) diff --git a/src/metax_api/api/rpc/base/views/statistic_rpc.py b/src/metax_api/api/rpc/base/views/statistic_rpc.py index 96c4713f..aa7479d1 100755 --- a/src/metax_api/api/rpc/base/views/statistic_rpc.py +++ b/src/metax_api/api/rpc/base/views/statistic_rpc.py @@ -131,6 +131,11 @@ def organization_datasets_cumulative(self, request): "to_date": request.query_params.get("to_date", None), "metadata_owner_org": request.query_params.get("metadata_owner_org", None), } + + for boolean_param in ["latest", "legacy", "removed"]: + if boolean_param in request.query_params: + params[boolean_param] = CS.get_boolean_query_param(request, boolean_param) + return Response(StatisticService.total_organization_datasets(**params)) @action(detail=False, methods=["get"], url_path="unused_files") diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index fec1bda3..eac391ba 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -333,7 +333,7 @@ def _total_data_catalog_datasets(cls, from_date, to_date, access_types, dc_id): return grouped @classmethod - def total_organization_datasets(cls, from_date, to_date, metadata_owner_org=None): + def total_organization_datasets(cls, from_date, to_date, metadata_owner_org=None, latest=True, legacy=None, removed=None): """ Retrieve dataset count and byte size per month and monthly cumulative for all organizations, or a given single organization, grouped by catalog. @@ -354,12 +354,12 @@ def total_organization_datasets(cls, from_date, to_date, metadata_owner_org=None results = {} for org in metadata_owner_orgs: - results[org] = cls._total_organization_datasets(from_date, to_date, org) + results[org] = cls._total_organization_datasets(from_date, to_date, org, latest, legacy, removed) return results @classmethod - def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): + def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org, latest, legacy, removed): sql = """ WITH cte AS ( SELECT @@ -370,6 +370,7 @@ def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): where dc.id = %s and state = 'published' and cr.metadata_owner_org = %s + OPTIONAL_WHERE_FILTERS GROUP BY mon ) SELECT @@ -391,12 +392,37 @@ def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): where dc.id = %s and state = 'published' and cr.metadata_owner_org = %s + OPTIONAL_WHERE_FILTERS GROUP BY mon ) cr USING (mon) GROUP BY mon, c.mon_ida_byte_size, count ORDER BY mon; """ + filter_sql = [] + filter_args = [] + + if latest: + filter_sql.append("and next_dataset_version_id is null") + + if removed is not None: + filter_sql.append("and cr.removed = %s") + filter_args.append(removed) + + if legacy is not None: + filter_sql.append( + "".join( + [ + "and dc.catalog_json->>'identifier'", + " = " if legacy else " != ", + "any(%s)", + ] + ) + ) + filter_args.append(settings.LEGACY_CATALOGS) + + sql = sql.replace("OPTIONAL_WHERE_FILTERS", "\n".join(filter_sql)) + catalogs = DataCatalog.objects.filter( catalog_json__research_dataset_schema__in=["ida", "att"] ).values() @@ -406,17 +432,8 @@ def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org): with connection.cursor() as cr: for dc in catalogs: - cr.execute( - sql, - [ - dc["id"], - metadata_owner_org, - from_date, - to_date, - dc["id"], - metadata_owner_org, - ], - ) + sql_args = [dc["id"], metadata_owner_org] + filter_args + [from_date, to_date, dc["id"], metadata_owner_org] + filter_args + cr.execute(sql, sql_args) results = [ dict(zip([col[0] for col in cr.description], row)) for row in cr.fetchall() ] diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index 8aa085c9..cb605be1 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -1974,6 +1974,9 @@ paths: - $ref: "#/parameters/from_date" - $ref: "#/parameters/to_date" - $ref: "#/parameters/metadata_owner_org_filter" + - $ref: "#/parameters/latest_filter" + - $ref: "#/parameters/removed_filter" + - $ref: "#/parameters/legacy_filter" responses: '200': description: Successful operation. diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index f93683a6..51a88991 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -2254,6 +2254,9 @@ paths: - $ref: "#/parameters/from_date" - $ref: "#/parameters/to_date" - $ref: "#/parameters/metadata_owner_org_filter" + - $ref: "#/parameters/latest_filter" + - $ref: "#/parameters/removed_filter" + - $ref: "#/parameters/legacy_filter" responses: '200': description: Successful operation. diff --git a/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py b/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py index e330e325..5d57b244 100755 --- a/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/statistic_rpc.py @@ -932,3 +932,61 @@ def test_organization_datasets_cumulative_for_drafts(self): # ensure the totals are calculated without drafts self.assertNotEqual(total_1, total_2, "Count be reduced by one after setting id=1 as draft") + + +class StatisticRPCforOrganizationDatasetsCumulative(StatisticRPCCommon, CatalogRecordApiWriteCommon): + """ + Test suite for organization_datasets_cumulative. Test only optional parameters removed, legacy and latest for now. + """ + + url = "/rpc/statistics/organization_datasets_cumulative" + dateparam_all = "from_date=2018-06-01&to_date=2018-06-30" + + def test_organization_datasets_cumulative_removed(self): + # initially there are 2 datasets + response = self.client.get(f"{self.url}?{self.dateparam_all}").data + self.assertEqual(response["org_2"]["urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d1"][0]["count"], 2) + + # removed=true should return 0 datasets since none have been removed + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true").data + self.assertEqual(response["org_2"]["urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d1"][0]["count"], 0) + + # remove one dataset from June, so one dataset should be left + self._set_removed_dataset(id=8) + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=false").data + self.assertEqual(response["org_2"]["urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d1"][0]["count"], 1) + + # removed=true should return 1 removed dataset now + response = self.client.get(f"{self.url}?{self.dateparam_all}&removed=true").data + self.assertEqual(response["org_2"]["urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d1"][0]["count"], 1) + + def test_organization_datasets_cumulative_legacy(self): + # let's create 2 legacy datasets for 2018-06 + leg_cr = ( + self._create_legacy_dataset() + ) + self._set_dataset_creation_date(leg_cr, "2018-06-13") + leg_cr2 = ( + self._create_legacy_dataset() + ) + self._set_dataset_creation_date(leg_cr2, "2018-06-13") + + response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=true").data + self.assertEqual(response["some_org_id"]["urn:nbn:fi:att:data-catalog-legacy"][0]["count"], 2) + + # check that legacy=false returns 0 datasets + response = self.client.get(f"{self.url}?{self.dateparam_all}&legacy=false").data + self.assertEqual(response["some_org_id"]["urn:nbn:fi:att:data-catalog-legacy"][0]["count"], 0) + + def test_organization_datasets_cumulative_latest(self): + # let's create 2 versions of a dataset for 2018-06 + first_version = self._create_new_dataset_version() + self._set_dataset_creation_date(first_version, "2018-06-13") + second_version = self._create_new_dataset_version() + self._set_dataset_creation_date(second_version, "2018-06-13") + response = self.client.get(f"{self.url}?{self.dateparam_all}&latest=false").data # returns all + self.assertEqual(response["org_1"]["urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d1"][0]["count"], 2) + + # latest=true should only return the latest version + response = self.client.get(f"{self.url}?{self.dateparam_all}&latest=true").data + self.assertEqual(response["org_1"]["urn:nbn:fi:att:2955e904-e3dd-4d7e-99f1-3fed446f96d1"][0]["count"], 1) \ No newline at end of file From 2ec52b39e32f454dfde26b1ecc4fa020b19ce042 Mon Sep 17 00:00:00 2001 From: Toni Nurmi Date: Tue, 18 Jan 2022 09:57:34 +0000 Subject: [PATCH 143/160] CSCFAIRADM-935 Deployment naming --- .gitlab-ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 02300332..f63c2c8f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -15,13 +15,13 @@ deploy: stage: deploy environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $DEPLOY_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA repo_version=$CI_COMMIT_REF_NAME" + - ansible-playbook -i $ANSIBLE_INVENTORY $DEPLOY_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG repo_version=$CI_COMMIT_REF_NAME" integration_test: stage: test environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $TEST_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" + - ansible-playbook -i $ANSIBLE_INVENTORY $TEST_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" clean_test: stage: clean_test @@ -29,7 +29,7 @@ clean_test: name: $CI_COMMIT_REF_NAME on_stop: clean_gitlab_env script: - - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" + - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" rules: - if: $CI_PIPELINE_SOURCE == "merge_request_event" when: always @@ -53,8 +53,8 @@ update_metax: stage: update environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $UPDATE_PROXY_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" - - ansible-playbook -i $ANSIBLE_INVENTORY $MANAGE_PLAYBOOK -e "build_id=$CI_COMMIT_SHORT_SHA" + - ansible-playbook -i $ANSIBLE_INVENTORY $UPDATE_PROXY_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" + - ansible-playbook -i $ANSIBLE_INVENTORY $MANAGE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" rules: - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ when: always @@ -64,7 +64,7 @@ clean_previous_build: stage: clean_build environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=${CI_COMMIT_BEFORE_SHA:0:8}" + - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" rules: - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ when: always From 499cae5f5fac0ebf187b6d061cbcc72c19d9195f Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Tue, 18 Jan 2022 12:12:23 +0200 Subject: [PATCH 144/160] CSCFAIRMETA-1286: Changed RabbitMQ port binding in docker-compose --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index b70b717c..f4562610 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -52,7 +52,7 @@ services: image: rabbitmq:3-management hostname: metax-rabbitmq ports: - - 15672:15672 + - 8050:15672 volumes: - metax-rabbitmq:/var/lib/rabbitmq From 09444c9d80cf5e25a07fc26ee016a47dd8fbad5a Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Thu, 20 Jan 2022 12:47:32 +0200 Subject: [PATCH 145/160] CSCFAIRMETA-1298: Fix statistic RPC bug with multiple legacy catalogs --- src/metax_api/services/statistic_service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/metax_api/services/statistic_service.py b/src/metax_api/services/statistic_service.py index eac391ba..ac052619 100755 --- a/src/metax_api/services/statistic_service.py +++ b/src/metax_api/services/statistic_service.py @@ -413,9 +413,9 @@ def _total_organization_datasets(cls, from_date, to_date, metadata_owner_org, la filter_sql.append( "".join( [ - "and dc.catalog_json->>'identifier'", - " = " if legacy else " != ", - "any(%s)", + "AND ", + "" if legacy else "NOT ", + "dc.catalog_json->>'identifier' = ANY(%s)", ] ) ) From 63ce8499211bab52c4d29f460c2e2b884179d2b8 Mon Sep 17 00:00:00 2001 From: aptiaine Date: Wed, 15 Dec 2021 15:13:50 +0200 Subject: [PATCH 146/160] Removed project byte_size and file_count calculations --- src/metax_api/services/file_service.py | 13 ------------- .../tests/api/rest/base/views/directories/write.py | 5 ----- .../tests/api/rest/base/views/files/write.py | 3 --- .../tests/api/rest/v2/views/directories/write.py | 5 ----- .../tests/api/rpc/base/views/dataset_rpc.py | 1 - 5 files changed, 27 deletions(-) diff --git a/src/metax_api/services/file_service.py b/src/metax_api/services/file_service.py index ad0c983e..59d90c38 100755 --- a/src/metax_api/services/file_service.py +++ b/src/metax_api/services/file_service.py @@ -227,8 +227,6 @@ def restore_files(cls, request, file_identifier_list): _logger.info("Restored %d files in project %s" % (affected_rows, project_identifier)) - cls.calculate_project_directory_byte_sizes_and_file_counts(project_identifier) - return Response({"restored_files_count": affected_rows}, status=status.HTTP_200_OK) @classmethod @@ -336,7 +334,6 @@ def destroy_single(cls, file): deleted_files_count, project_identifier = cls._mark_files_as_deleted([file.id]) cls._delete_empy_dir_chain_above(file.parent_directory) - cls.calculate_project_directory_byte_sizes_and_file_counts(file.project_identifier) cls._mark_datasets_as_deprecated([file.id]) CallableService.add_post_request_callable( @@ -380,7 +377,6 @@ def destroy_bulk(cls, file_identifiers): deleted_files_count, project_identifier = cls._mark_files_as_deleted(file_ids) cls._find_and_delete_empty_directories(project_identifier) - cls.calculate_project_directory_byte_sizes_and_file_counts(project_identifier) cls._mark_datasets_as_deprecated(file_ids) file = File.objects_unfiltered.get(pk=file_ids[0]) @@ -422,7 +418,6 @@ def delete_project(cls, project_id): if file_ids: deleted_files_count = cls._mark_files_as_deleted(file_ids)[0] cls._find_and_delete_empty_directories(project_id) - cls.calculate_project_directory_byte_sizes_and_file_counts(project_id) cls._mark_datasets_as_deprecated(file_ids) else: _logger.info("Project %s contained no files" % project_id) @@ -1178,10 +1173,6 @@ def _create_single(cls, common_info, initial_data, serializer_class, **kwargs): common_info, initial_data_with_dirs[0], serializer_class, **kwargs ) - cls.calculate_project_directory_byte_sizes_and_file_counts( - initial_data["project_identifier"] - ) - CallableService.add_post_request_callable( DelayedLog( event="files_created", @@ -1238,10 +1229,6 @@ def _create_bulk(cls, common_info, initial_data_list, results, serializer_class, cls._create_files(common_info, file_list_with_dirs, results, serializer_class, **kwargs) - cls.calculate_project_directory_byte_sizes_and_file_counts( - initial_data_list[0]["project_identifier"] - ) - CallableService.add_post_request_callable( DelayedLog( event="files_created", diff --git a/src/metax_api/tests/api/rest/base/views/directories/write.py b/src/metax_api/tests/api/rest/base/views/directories/write.py index c9268578..cd507b80 100755 --- a/src/metax_api/tests/api/rest/base/views/directories/write.py +++ b/src/metax_api/tests/api/rest/base/views/directories/write.py @@ -183,11 +183,6 @@ def test_create_files_for_catalog_record(self): 2, "Expected 2 files in directory {}".format(dirs[0]["directory_path"]), ) - self.assertEqual( - response.data["file_count"], - len(response.data["files"]), - "Expected 2 file in parent file_count", - ) # adding file2 and file3 to dataset cr["research_dataset"]["files"] = cr["research_dataset"]["files"] + [ diff --git a/src/metax_api/tests/api/rest/base/views/files/write.py b/src/metax_api/tests/api/rest/base/views/files/write.py index b93816eb..5874eaba 100755 --- a/src/metax_api/tests/api/rest/base/views/files/write.py +++ b/src/metax_api/tests/api/rest/base/views/files/write.py @@ -94,8 +94,6 @@ def _check_project_root_byte_size_and_file_count(self, project_identifier): response = self.client.get("/rest/directories/root?project=%s" % project_identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) - self.assertEqual(response.data["byte_size"], byte_size) - self.assertEqual(response.data["file_count"], file_count) def _change_file_path(self, file, new_name): file["file_path"] = file["file_path"].replace(file["file_name"], new_name) @@ -279,7 +277,6 @@ def test_create_file(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual("file_name" in response.data.keys(), True) self.assertEqual(response.data["file_name"], newly_created_file_name) - self._check_project_root_byte_size_and_file_count(response.data["project_identifier"]) def test_create_file_error_identifier_exists(self): # first ok diff --git a/src/metax_api/tests/api/rest/v2/views/directories/write.py b/src/metax_api/tests/api/rest/v2/views/directories/write.py index a7526984..41ac310e 100755 --- a/src/metax_api/tests/api/rest/v2/views/directories/write.py +++ b/src/metax_api/tests/api/rest/v2/views/directories/write.py @@ -174,11 +174,6 @@ def test_create_files_for_catalog_record(self): 2, "Expected 2 files in directory {}".format(dirs[0]["directory_path"]), ) - self.assertEqual( - response.data["file_count"], - len(response.data["files"]), - "Expected 2 file in parent file_count", - ) # adding file2 and file3 to dataset diff --git a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py index f2d1230c..07166c20 100755 --- a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py @@ -381,7 +381,6 @@ def test_refresh_with_cumulative_state_yes(self): str(new_dir.id) in cr_after._directory_data, "New dir id should be found in cr", ) - self.assertEqual(new_dir.byte_size, self._single_file_byte_size * 2) def test_refreshing_deprecated_dataset_is_not_allowed(self): self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") From d17b5f52ed6edb303a7a7a696f1066dfc50fdda4 Mon Sep 17 00:00:00 2001 From: tonurmi Date: Tue, 25 Jan 2022 15:28:40 +0200 Subject: [PATCH 147/160] disable clean_previous_build, make it manual step --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f63c2c8f..92823680 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -67,5 +67,5 @@ clean_previous_build: - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" rules: - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ - when: always + when: manual - when: never From 4cece3f0219921b2065c478961f4b94588d68e51 Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Thu, 27 Jan 2022 09:42:22 +0200 Subject: [PATCH 148/160] Revert removing project size calculation 63ce8499 --- src/metax_api/services/file_service.py | 13 +++++++++++++ .../tests/api/rest/base/views/directories/write.py | 5 +++++ .../tests/api/rest/base/views/files/write.py | 3 +++ .../tests/api/rest/v2/views/directories/write.py | 5 +++++ .../tests/api/rpc/base/views/dataset_rpc.py | 1 + 5 files changed, 27 insertions(+) diff --git a/src/metax_api/services/file_service.py b/src/metax_api/services/file_service.py index 59d90c38..ad0c983e 100755 --- a/src/metax_api/services/file_service.py +++ b/src/metax_api/services/file_service.py @@ -227,6 +227,8 @@ def restore_files(cls, request, file_identifier_list): _logger.info("Restored %d files in project %s" % (affected_rows, project_identifier)) + cls.calculate_project_directory_byte_sizes_and_file_counts(project_identifier) + return Response({"restored_files_count": affected_rows}, status=status.HTTP_200_OK) @classmethod @@ -334,6 +336,7 @@ def destroy_single(cls, file): deleted_files_count, project_identifier = cls._mark_files_as_deleted([file.id]) cls._delete_empy_dir_chain_above(file.parent_directory) + cls.calculate_project_directory_byte_sizes_and_file_counts(file.project_identifier) cls._mark_datasets_as_deprecated([file.id]) CallableService.add_post_request_callable( @@ -377,6 +380,7 @@ def destroy_bulk(cls, file_identifiers): deleted_files_count, project_identifier = cls._mark_files_as_deleted(file_ids) cls._find_and_delete_empty_directories(project_identifier) + cls.calculate_project_directory_byte_sizes_and_file_counts(project_identifier) cls._mark_datasets_as_deprecated(file_ids) file = File.objects_unfiltered.get(pk=file_ids[0]) @@ -418,6 +422,7 @@ def delete_project(cls, project_id): if file_ids: deleted_files_count = cls._mark_files_as_deleted(file_ids)[0] cls._find_and_delete_empty_directories(project_id) + cls.calculate_project_directory_byte_sizes_and_file_counts(project_id) cls._mark_datasets_as_deprecated(file_ids) else: _logger.info("Project %s contained no files" % project_id) @@ -1173,6 +1178,10 @@ def _create_single(cls, common_info, initial_data, serializer_class, **kwargs): common_info, initial_data_with_dirs[0], serializer_class, **kwargs ) + cls.calculate_project_directory_byte_sizes_and_file_counts( + initial_data["project_identifier"] + ) + CallableService.add_post_request_callable( DelayedLog( event="files_created", @@ -1229,6 +1238,10 @@ def _create_bulk(cls, common_info, initial_data_list, results, serializer_class, cls._create_files(common_info, file_list_with_dirs, results, serializer_class, **kwargs) + cls.calculate_project_directory_byte_sizes_and_file_counts( + initial_data_list[0]["project_identifier"] + ) + CallableService.add_post_request_callable( DelayedLog( event="files_created", diff --git a/src/metax_api/tests/api/rest/base/views/directories/write.py b/src/metax_api/tests/api/rest/base/views/directories/write.py index cd507b80..c9268578 100755 --- a/src/metax_api/tests/api/rest/base/views/directories/write.py +++ b/src/metax_api/tests/api/rest/base/views/directories/write.py @@ -183,6 +183,11 @@ def test_create_files_for_catalog_record(self): 2, "Expected 2 files in directory {}".format(dirs[0]["directory_path"]), ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 2 file in parent file_count", + ) # adding file2 and file3 to dataset cr["research_dataset"]["files"] = cr["research_dataset"]["files"] + [ diff --git a/src/metax_api/tests/api/rest/base/views/files/write.py b/src/metax_api/tests/api/rest/base/views/files/write.py index 5874eaba..b93816eb 100755 --- a/src/metax_api/tests/api/rest/base/views/files/write.py +++ b/src/metax_api/tests/api/rest/base/views/files/write.py @@ -94,6 +94,8 @@ def _check_project_root_byte_size_and_file_count(self, project_identifier): response = self.client.get("/rest/directories/root?project=%s" % project_identifier) self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data["byte_size"], byte_size) + self.assertEqual(response.data["file_count"], file_count) def _change_file_path(self, file, new_name): file["file_path"] = file["file_path"].replace(file["file_name"], new_name) @@ -277,6 +279,7 @@ def test_create_file(self): self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data) self.assertEqual("file_name" in response.data.keys(), True) self.assertEqual(response.data["file_name"], newly_created_file_name) + self._check_project_root_byte_size_and_file_count(response.data["project_identifier"]) def test_create_file_error_identifier_exists(self): # first ok diff --git a/src/metax_api/tests/api/rest/v2/views/directories/write.py b/src/metax_api/tests/api/rest/v2/views/directories/write.py index 41ac310e..a7526984 100755 --- a/src/metax_api/tests/api/rest/v2/views/directories/write.py +++ b/src/metax_api/tests/api/rest/v2/views/directories/write.py @@ -174,6 +174,11 @@ def test_create_files_for_catalog_record(self): 2, "Expected 2 files in directory {}".format(dirs[0]["directory_path"]), ) + self.assertEqual( + response.data["file_count"], + len(response.data["files"]), + "Expected 2 file in parent file_count", + ) # adding file2 and file3 to dataset diff --git a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py index 07166c20..f2d1230c 100755 --- a/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py +++ b/src/metax_api/tests/api/rpc/base/views/dataset_rpc.py @@ -381,6 +381,7 @@ def test_refresh_with_cumulative_state_yes(self): str(new_dir.id) in cr_after._directory_data, "New dir id should be found in cr", ) + self.assertEqual(new_dir.byte_size, self._single_file_byte_size * 2) def test_refreshing_deprecated_dataset_is_not_allowed(self): self._add_directory(self.cr_test_data, "/TestExperiment/Directory_2") From 578a0485565fb3686768cdb2e3378819e91103bb Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Thu, 27 Jan 2022 09:43:23 +0200 Subject: [PATCH 149/160] CSCFAIRMETA-1306: Use aggregate for directory size calculation --- src/metax_api/models/directory.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/metax_api/models/directory.py b/src/metax_api/models/directory.py index 5b8c1714..a2378594 100755 --- a/src/metax_api/models/directory.py +++ b/src/metax_api/models/directory.py @@ -112,12 +112,6 @@ def _calculate_byte_size_and_file_count(self, update_statements): sub_dirs = ( self.child_directories.all() .only("byte_size", "parent_directory_id") - .prefetch_related( - Prefetch( - "files", - queryset=File.objects.only("id", "byte_size", "parent_directory_id"), - ) - ) ) if sub_dirs: @@ -129,8 +123,9 @@ def _calculate_byte_size_and_file_count(self, update_statements): self.file_count = sum(d.file_count for d in sub_dirs) # note: never actually saved using .save() - self.byte_size += sum(f.byte_size for f in self.files.all()) or 0 - self.file_count += len(self.files.all()) or 0 + file_aggregates = self.files.aggregate(byte_size=Sum("byte_size"), count=Count("*")) + self.byte_size += file_aggregates['byte_size'] or 0 + self.file_count += file_aggregates['count'] or 0 update_statements.append("(%d, %d, %d)" % (self.byte_size, self.file_count, self.id)) From dfde5a4aa00e46097826f5fd97b9a112c72d37ee Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Fri, 28 Jan 2022 08:43:53 +0200 Subject: [PATCH 150/160] CSCFAIRMETA-1036: Aggregate all project directory sizes in one query --- src/metax_api/models/directory.py | 51 +++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/src/metax_api/models/directory.py b/src/metax_api/models/directory.py index a2378594..3fae359f 100755 --- a/src/metax_api/models/directory.py +++ b/src/metax_api/models/directory.py @@ -8,7 +8,7 @@ import logging from django.db import connection, models -from django.db.models import Count, Prefetch, Sum +from django.db.models import Count, Prefetch, Sum, Q from .common import Common from .file import File @@ -71,8 +71,8 @@ def calculate_byte_size_and_file_count(self): ) update_statements = [] - - self._calculate_byte_size_and_file_count(update_statements) + annotated_root_directory = self._get_project_directories_with_own_sizes() + annotated_root_directory._calculate_byte_size_and_file_count(update_statements) sql_update_all_directories = """ update metax_api_directory as d set @@ -100,6 +100,32 @@ def calculate_byte_size_and_file_count(self): ) ) + def _get_project_directories_with_own_sizes(self): + """ + Get all project directories with total size and count of files + they contain annotated as own_byte_size and own_file_count. + + Returns root directory with annotated subdirectories in directory.sub_dirs. + """ + project_directories = ( + Directory.objects.filter(project_identifier=self.project_identifier) + .only("byte_size", "parent_directory_id") + .annotate( + own_byte_size=Sum("files__byte_size", filter=Q(files__removed=False)), + own_file_count=Count("files", filter=Q(files__removed=False)), + ) + ) + + # build directory tree from annotated directories + directories_by_id = {d.id: d for d in project_directories} + for d in project_directories: + d.sub_dirs = [] + for d in project_directories: + if d.parent_directory_id is not None: + directories_by_id[d.parent_directory_id].sub_dirs.append(d) + annotated_root_directory = directories_by_id.get(self.id) + return annotated_root_directory + def _calculate_byte_size_and_file_count(self, update_statements): """ Recursively traverse the entire directory tree and update total byte size and file count @@ -108,24 +134,17 @@ def _calculate_byte_size_and_file_count(self, update_statements): self.byte_size = 0 self.file_count = 0 - # fields id, parent_directory_id must be specified for joining for Prefetch-object to work properly - sub_dirs = ( - self.child_directories.all() - .only("byte_size", "parent_directory_id") - ) - - if sub_dirs: - for sub_dir in sub_dirs: + if self.sub_dirs: + for sub_dir in self.sub_dirs: sub_dir._calculate_byte_size_and_file_count(update_statements) # sub dir numbers - self.byte_size = sum(d.byte_size for d in sub_dirs) - self.file_count = sum(d.file_count for d in sub_dirs) + self.byte_size = sum(d.byte_size for d in self.sub_dirs) + self.file_count = sum(d.file_count for d in self.sub_dirs) # note: never actually saved using .save() - file_aggregates = self.files.aggregate(byte_size=Sum("byte_size"), count=Count("*")) - self.byte_size += file_aggregates['byte_size'] or 0 - self.file_count += file_aggregates['count'] or 0 + self.byte_size += self.own_byte_size or 0 + self.file_count += self.own_file_count or 0 update_statements.append("(%d, %d, %d)" % (self.byte_size, self.file_count, self.id)) From c67e3ec2c6c0f793725e8d5183560320c3926e76 Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Fri, 28 Jan 2022 09:05:38 +0200 Subject: [PATCH 151/160] Update directory size only if it has changed. --- src/metax_api/models/directory.py | 35 ++++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/src/metax_api/models/directory.py b/src/metax_api/models/directory.py index 3fae359f..7ff191f6 100755 --- a/src/metax_api/models/directory.py +++ b/src/metax_api/models/directory.py @@ -74,20 +74,21 @@ def calculate_byte_size_and_file_count(self): annotated_root_directory = self._get_project_directories_with_own_sizes() annotated_root_directory._calculate_byte_size_and_file_count(update_statements) - sql_update_all_directories = """ - update metax_api_directory as d set - byte_size = results.byte_size, - file_count = results.file_count - from (values - %s - ) as results(byte_size, file_count, id) - where results.id = d.id; - """ % ",".join( - update_statements - ) + if len(update_statements) > 0: + sql_update_all_directories = """ + update metax_api_directory as d set + byte_size = results.byte_size, + file_count = results.file_count + from (values + %s + ) as results(byte_size, file_count, id) + where results.id = d.id; + """ % ",".join( + update_statements + ) - with connection.cursor() as cursor: - cursor.execute(sql_update_all_directories) + with connection.cursor() as cursor: + cursor.execute(sql_update_all_directories) _logger.info( "Project %s directory tree calculations complete. Total byte_size: " @@ -109,7 +110,7 @@ def _get_project_directories_with_own_sizes(self): """ project_directories = ( Directory.objects.filter(project_identifier=self.project_identifier) - .only("byte_size", "parent_directory_id") + .only("file_count", "byte_size", "parent_directory_id") .annotate( own_byte_size=Sum("files__byte_size", filter=Q(files__removed=False)), own_file_count=Count("files", filter=Q(files__removed=False)), @@ -131,6 +132,8 @@ def _calculate_byte_size_and_file_count(self, update_statements): Recursively traverse the entire directory tree and update total byte size and file count for each directory. Accumulates a list of triplets for a big sql-update statement. """ + old_byte_size = self.byte_size + old_file_count = self.file_count self.byte_size = 0 self.file_count = 0 @@ -146,7 +149,9 @@ def _calculate_byte_size_and_file_count(self, update_statements): self.byte_size += self.own_byte_size or 0 self.file_count += self.own_file_count or 0 - update_statements.append("(%d, %d, %d)" % (self.byte_size, self.file_count, self.id)) + # add updated values if changed + if self.byte_size != old_byte_size or self.file_count != old_file_count: + update_statements.append("(%d, %d, %d)" % (self.byte_size, self.file_count, self.id)) def calculate_byte_size_and_file_count_for_cr(self, cr_id, directory_data): """ From e0308332169b21e534a5b77e3748eb3a2ee6acda Mon Sep 17 00:00:00 2001 From: Jori Niemi <3295718+tahme@users.noreply.github.com> Date: Mon, 31 Jan 2022 10:26:38 +0200 Subject: [PATCH 152/160] CSCFAIRMETA-1309: Get all directories in single query when calculating dataset size --- src/metax_api/models/directory.py | 37 +++++++++++++++++-------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/src/metax_api/models/directory.py b/src/metax_api/models/directory.py index 7ff191f6..bc2f68fd 100755 --- a/src/metax_api/models/directory.py +++ b/src/metax_api/models/directory.py @@ -71,7 +71,7 @@ def calculate_byte_size_and_file_count(self): ) update_statements = [] - annotated_root_directory = self._get_project_directories_with_own_sizes() + annotated_root_directory = self._get_project_directory_tree(with_own_sizes=True) annotated_root_directory._calculate_byte_size_and_file_count(update_statements) if len(update_statements) > 0: @@ -101,21 +101,23 @@ def calculate_byte_size_and_file_count(self): ) ) - def _get_project_directories_with_own_sizes(self): + def _get_project_directory_tree(self, with_own_sizes=False): """ - Get all project directories with total size and count of files - they contain annotated as own_byte_size and own_file_count. + Get all project directories from DB in single query. Returns current directory with + subdirectories in directory.sub_dirs. - Returns root directory with annotated subdirectories in directory.sub_dirs. + Optionally, annotate directories with total size and + count of files they contain as own_byte_size and own_file_count. """ - project_directories = ( - Directory.objects.filter(project_identifier=self.project_identifier) - .only("file_count", "byte_size", "parent_directory_id") - .annotate( + project_directories = Directory.objects.filter( + project_identifier=self.project_identifier + ).only("file_count", "byte_size", "parent_directory_id") + + if with_own_sizes: + project_directories = project_directories.annotate( own_byte_size=Sum("files__byte_size", filter=Q(files__removed=False)), own_file_count=Count("files", filter=Q(files__removed=False)), ) - ) # build directory tree from annotated directories directories_by_id = {d.id: d for d in project_directories} @@ -175,7 +177,10 @@ def calculate_byte_size_and_file_count_for_cr(self, cr_id, directory_data): parent_id: (byte_size, file_count) for parent_id, byte_size, file_count in stats } - self._calculate_byte_size_and_file_count_for_cr(grouped_by_dir, directory_data) + directory_tree = self._get_project_directory_tree() + directory_tree._calculate_byte_size_and_file_count_for_cr( + grouped_by_dir, directory_data + ) def _calculate_byte_size_and_file_count_for_cr(self, grouped_by_dir, directory_data): """ @@ -192,15 +197,13 @@ def _calculate_byte_size_and_file_count_for_cr(self, grouped_by_dir, directory_d self.byte_size = 0 self.file_count = 0 - sub_dirs = self.child_directories.all().only("id") - - if sub_dirs: - for sub_dir in sub_dirs: + if self.sub_dirs: + for sub_dir in self.sub_dirs: sub_dir._calculate_byte_size_and_file_count_for_cr(grouped_by_dir, directory_data) # sub dir numbers - self.byte_size = sum(d.byte_size for d in sub_dirs) - self.file_count = sum(d.file_count for d in sub_dirs) + self.byte_size = sum(d.byte_size for d in self.sub_dirs) + self.file_count = sum(d.file_count for d in self.sub_dirs) current_dir = grouped_by_dir.get(self.id, [0, 0]) From 098ab8c3d66ad3a16576be81881b1c6a086e0cfd Mon Sep 17 00:00:00 2001 From: tonurmi Date: Mon, 31 Jan 2022 16:04:09 +0200 Subject: [PATCH 153/160] Fix Virtualenv missing repo_version parameter in gitlab ci --- .gitlab-ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 92823680..87e647dd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,7 @@ integration_test: stage: test environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $TEST_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" + - ansible-playbook -i $ANSIBLE_INVENTORY $TEST_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG repo_version=$CI_COMMIT_REF_NAME" clean_test: stage: clean_test @@ -29,7 +29,7 @@ clean_test: name: $CI_COMMIT_REF_NAME on_stop: clean_gitlab_env script: - - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" + - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG repo_version=$CI_COMMIT_REF_NAME" rules: - if: $CI_PIPELINE_SOURCE == "merge_request_event" when: always @@ -53,8 +53,8 @@ update_metax: stage: update environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $UPDATE_PROXY_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" - - ansible-playbook -i $ANSIBLE_INVENTORY $MANAGE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" + - ansible-playbook -i $ANSIBLE_INVENTORY $UPDATE_PROXY_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG repo_version=$CI_COMMIT_REF_NAME" + - ansible-playbook -i $ANSIBLE_INVENTORY $MANAGE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG repo_version=$CI_COMMIT_REF_NAME" rules: - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ when: always @@ -64,7 +64,7 @@ clean_previous_build: stage: clean_build environment: $CI_COMMIT_REF_NAME script: - - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG" + - ansible-playbook -i $ANSIBLE_INVENTORY $DELETE_PLAYBOOK -e "build_id=$CI_COMMIT_REF_SLUG repo_version=$CI_COMMIT_REF_NAME" rules: - if: $CI_COMMIT_BRANCH =~ /^(demo|stable|staging|test)$/ when: manual From e04d47eb3d915adb1634acb3cf0eb80ff9d54797 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Fri, 11 Feb 2022 08:31:47 +0200 Subject: [PATCH 154/160] CSCFAIRMETA-1336: Fix OAI-PMH ListRecords bug --- src/metax_api/api/oaipmh/base/metax_oai_server.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/metax_api/api/oaipmh/base/metax_oai_server.py b/src/metax_api/api/oaipmh/base/metax_oai_server.py index 2fd78634..e34b62df 100755 --- a/src/metax_api/api/oaipmh/base/metax_oai_server.py +++ b/src/metax_api/api/oaipmh/base/metax_oai_server.py @@ -281,8 +281,11 @@ def _get_oai_dc_metadata(self, record, json): publisher = [] publisher_data = json.get("publisher", {}) - for key, value in publisher_data.get("name", {}).items(): - publisher.append(self._get_oaic_dc_value(value, key)) + if publisher_data.get("@type") == "Person": + publisher.append(self._get_oaic_dc_value(publisher_data.get("name"))) + else: + for key, value in publisher_data.get("name", {}).items(): + publisher.append(self._get_oaic_dc_value(value, key)) contributor = [] contributor_data = json.get("contributor", []) From 804221fbc4813c1ed4608636845c97fed25f2ad3 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Wed, 16 Feb 2022 08:59:18 +0200 Subject: [PATCH 155/160] CSCFAIRMETA-1336: More fixes to OAI-PMH endpoint --- src/metax_api/services/datacite_service.py | 40 +++++++++++++++------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/src/metax_api/services/datacite_service.py b/src/metax_api/services/datacite_service.py index 144a8155..d60b6eb0 100755 --- a/src/metax_api/services/datacite_service.py +++ b/src/metax_api/services/datacite_service.py @@ -447,16 +447,27 @@ def _subjects(concept): def _licenses(access_rights): licenses = [] for license in access_rights["license"]: - for lang in license["title"].keys(): + # If only URL of the license is submitted. Use "en" as language + # and set rights and rightsURI to the URL + if len(license) == 1: licenses.append( { - "lang": lang, - "rightsURI": license["license"] - if "license" in license - else license["identifier"], - "rights": license["title"][lang], + "lang": "en", + "rightsURI": license["license"], + "rights": license["license"] } ) + else: + for lang in license["title"].keys(): + licenses.append( + { + "lang": lang, + "rightsURI": license["license"] + if "license" in license + else license["identifier"], + "rights": license["title"][lang], + } + ) return licenses @staticmethod @@ -470,12 +481,15 @@ def _spatials(spatials): for wkt in spatial.get("as_wkt", []): if wkt.startswith("POINT"): - geo_location["geoLocationPoint"] = { - "pointLongitude": float( - re.search(r"POINT\((.*) ", wkt, re.IGNORECASE).group(1) - ), - "pointLatitude": float(re.search(r" (.*)\)", wkt, re.IGNORECASE).group(1)), + point = wkt.strip("POINT()") + point_list = point.strip().split(" ") + longitude = point_list[0] + latitude = point_list[1] + polygon_point = { + "pointLongitude": float(longitude.strip("()")), + "pointLatitude": float(latitude.strip("()")), } + geo_location["geoLocationPoint"] = polygon_point # only one point can be placed break @@ -486,8 +500,8 @@ def _spatials(spatials): for point in polygon.split(","): longitude, latitude = point.strip().split(" ") polygon_point = { - "pointLongitude": float(longitude), - "pointLatitude": float(latitude), + "pointLongitude": float(longitude.strip("()")), + "pointLatitude": float(latitude.strip("()")), } geo_location["geoLocationPolygon"]["polygonPoints"].append( polygon_point From ca9e6b003ef9fb8cb4304e9de4ddb07a088d6e3d Mon Sep 17 00:00:00 2001 From: Jori Niemi Date: Wed, 16 Feb 2022 07:12:51 +0000 Subject: [PATCH 156/160] CSCFAIRMETA-1269: Management command for recreating missing REMS items --- .../commands/create_missing_rems_items.py | 50 +++++ src/metax_api/services/rems_service.py | 15 +- .../tests/management/commands/__init__.py | 4 + .../commands/create_missing_rems_items.py | 204 ++++++++++++++++++ .../management/commands/loadinitialdata.py | 2 + .../management/commands/mark_files_removed.py | 22 +- 6 files changed, 285 insertions(+), 12 deletions(-) create mode 100644 src/metax_api/management/commands/create_missing_rems_items.py create mode 100644 src/metax_api/tests/management/commands/create_missing_rems_items.py diff --git a/src/metax_api/management/commands/create_missing_rems_items.py b/src/metax_api/management/commands/create_missing_rems_items.py new file mode 100644 index 00000000..8254cf8b --- /dev/null +++ b/src/metax_api/management/commands/create_missing_rems_items.py @@ -0,0 +1,50 @@ +import logging + +from django.core.management.base import BaseCommand + +from metax_api.services.redis_cache_service import RedisClient +from metax_api.utils import ReferenceDataLoader +from metax_api.models import CatalogRecordV2 +from metax_api.services.rems_service import REMSCatalogItemNotFoundException, REMSService + +_logger = logging.getLogger(__name__) + +class Command(BaseCommand): + def handle(self, *args, **options): + rems_service = REMSService() + found_entity_count = 0 + created_entity_count = 0 + missing_entity_count = 0 + try: + rems_crs = CatalogRecordV2.objects.filter(rems_identifier__isnull=False) + _logger.info(f"Found {len(rems_crs)} CatalogRecords with rems_identifiers") + for cr in rems_crs: + try: + rems_service.get_rems_entity(cr) + found_entity_count += 1 + except REMSCatalogItemNotFoundException as e: + missing_entity_count += 1 + if not cr.access_granter: + _logger.info( + f"Missing access_granter for {cr.identifier}, not creating REMS entity" + ) + continue + if len(cr.research_dataset.get("access_rights", {}).get("license") or []) == 0: + _logger.info( + f"Missing license for {cr.identifier}, not creating REMS entity" + ) + continue + + _logger.info( + f"REMS entity {cr.rems_identifier} for dataset {cr.identifier} not found, creating" + ) + rems_service.create_rems_entity(cr, cr.access_granter) + created_entity_count += 1 + + except Exception as e: + _logger.error(e) + raise e + + _logger.info(f"CatalogRecords with existing REMS entities: {found_entity_count}") + _logger.info(f"Missing REMS entities: {missing_entity_count}") + _logger.info(f"Created REMS entities: {created_entity_count}") diff --git a/src/metax_api/services/rems_service.py b/src/metax_api/services/rems_service.py index 766d6f8b..5c931a7a 100755 --- a/src/metax_api/services/rems_service.py +++ b/src/metax_api/services/rems_service.py @@ -24,6 +24,10 @@ class REMSException(Exception): pass +class REMSCatalogItemNotFoundException(REMSException): + pass + + class REMSService: def __init__(self): if not hasattr(django_settings, "REMS"): @@ -73,6 +77,13 @@ def create_rems_entity(self, cr, user_info): self._create_catalogue_item(res_id, wf_id) + def get_rems_entity(self, cr): + """ + Get rems catalogue item for cr. + """ + rems_ci = self._get_catalogue_item(cr.rems_identifier) + return rems_ci + def close_rems_entity(self, old_rems_id, reason): """ Closes all applications and archives and disables all related entities @@ -112,7 +123,9 @@ def _get_catalogue_item(self, rems_id): if len(rems_ci) < 1: # pragma: no cover # this should not happen - raise REMSException(f"Could not find catalogue-item for {rems_id} in REMS.") + raise REMSCatalogItemNotFoundException( + f"Could not find catalogue-item for {rems_id} in REMS." + ) return rems_ci diff --git a/src/metax_api/tests/management/commands/__init__.py b/src/metax_api/tests/management/commands/__init__.py index 891c30d0..44b75d70 100755 --- a/src/metax_api/tests/management/commands/__init__.py +++ b/src/metax_api/tests/management/commands/__init__.py @@ -4,3 +4,7 @@ # # :author: CSC - IT Center for Science Ltd., Espoo Finland # :license: MIT + +from .create_missing_rems_items import * +from .mark_files_removed import * +from .loadinitialdata import * diff --git a/src/metax_api/tests/management/commands/create_missing_rems_items.py b/src/metax_api/tests/management/commands/create_missing_rems_items.py new file mode 100644 index 00000000..3b15f677 --- /dev/null +++ b/src/metax_api/tests/management/commands/create_missing_rems_items.py @@ -0,0 +1,204 @@ +# This file is part of the Metax API service +# +# Copyright 2021-2022 Ministry of Education and Culture, Finland +# +# :author: CSC - IT Center for Science Ltd., Espoo Finland +# :license: MIT +import json + +import responses +from django.core.management import call_command +from django.test import TestCase +from django.conf import settings + +from metax_api.models import CatalogRecordV2 +from metax_api.tests.utils import test_data_file_path + +access_granter = { + "userid": "access_granter", + "name": "Access Granter", + "email": "granter@example.com", +} + +license = [ + { + "title": { + "fi": "Creative Commons NimeƤ-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)", + "en": "Creative Commons Attribution-NonCommercial 2.0 Generic (CC BY-NC 2.0", + "und": "Creative Commons NimeƤ-EiKaupallinen 2.0 Yleinen (CC BY-NC 2.0)", + }, + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/CC-BY-NC-2.0", + "description": {"en": "Free account of the rights"}, + "license": "https://creativecommons.org/licenses/by-nc/2.0/", + }, + { + "title": {"fi": "Muu", "en": "Other", "und": "Muu"}, + "identifier": "http://uri.suomi.fi/codelist/fairdata/license/code/other", + }, +] + + +def get_calls_for_url(url): + """Get deserialized list of calls for url.""" + calls = [] + for call, response in responses.calls: + if call.url == url: + calls.append(json.loads(call.body)) + return calls + + +class TestCreateMissingREMSItems(TestCase): + def setUp(self): + call_command("loaddata", test_data_file_path, verbosity=0) + self.setup_catalog_records() + self.mock_rems_responses() + + def setup_catalog_records(self): + # has REMS entity + cr = CatalogRecordV2.objects.get(pk=1) + cr._initial_data["access_granter"] = access_granter + cr.research_dataset["access_rights"]["license"] = license + cr.rems_identifier = "already-exists" + cr.save() + + # missing REMS entity + cr = CatalogRecordV2.objects.get(pk=2) + cr._initial_data["access_granter"] = access_granter + cr.research_dataset["access_rights"]["license"] = license + cr.rems_identifier = "does-not-exist" + cr.save() + self.no_rems_cr = cr + + # missing REMS entity, missing license + cr = CatalogRecordV2.objects.get(pk=3) + cr._initial_data["access_granter"] = access_granter + cr.research_dataset["access_rights"]["license"] = None + cr.rems_identifier = "does-not-have-license" + cr.save() + self.no_license_cr = cr + + def mock_rems_responses(self): + rems_url = settings.REMS["BASE_URL"] + rems_org = settings.REMS["ORGANIZATION"] + responses.add(responses.GET, f"{rems_url}/health", json={"healthy": True}, status=200) + + # organization + responses.add( + responses.GET, + f"{rems_url}/organizations/{rems_org}", + json={ + "archived": False, + "organization/id": rems_org, + "organization/short-name": {"fi": "CSC", "en": "CSC", "sv": "CSC"}, + "organization/review-emails": [], + "enabled": True, + "organization/owners": [], + "organization/name": { + "fi": "Rems test org", + "en": "Rems test org", + "sv": "Rems test org", + }, + }, + status=200, + ) + + # catalogue-items handling and creation + responses.add( + responses.GET, + f"{rems_url}/catalogue-items?resource=already-exists&archived=true&disabled=true", + json=[{}], + status=200, + ) + responses.add( + responses.GET, + f"{rems_url}/catalogue-items?resource=does-not-exist&archived=true&disabled=true", + json=[], + status=200, + ) + responses.add( + responses.GET, + f"{rems_url}/catalogue-items?resource=does-not-have-license&archived=true&disabled=true", + json=[], + status=200, + ) + responses.add( + responses.POST, + f"{rems_url}/catalogue-items/create", + json={"success": True, "id": "created-catalogue-item-id"}, + status=200, + ) + + # user creation + responses.add( + responses.POST, + f"{rems_url}/users/create", + json={"success": True}, + status=200, + ) + + # workflow creation + responses.add( + responses.POST, + f"{rems_url}/workflows/create", + json={"success": True, "id": "created-workflow-id"}, + status=200, + ) + + # license handling + responses.add( + responses.GET, + f"{rems_url}/licenses?disabled=true&archived=true", + json=[{"localizations": {"en": {"textcontent": "https://example.com/license_x/en"}}}], + status=200, + ) + responses.add( + responses.POST, + f"{rems_url}/licenses/create", + json={"success": True, "id": "created-license-id"}, + status=200, + ) + + # resource creation + def create_resource_with_resid(request): + data = json.loads(request.body) + return 200, {}, json.dumps({"success": True, "id": data["resid"]}) + + responses.add_callback( + responses.POST, + f"{rems_url}/resources/create", + callback=create_resource_with_resid, + content_type="application/json", + ) + + @responses.activate + def test_rems_creates_catalogue_item(self): + call_command("create_missing_rems_items") + calls = get_calls_for_url("https://mock-rems/api/catalogue-items/create") + assert calls == [ + { + "form": 1, + "resid": "does-not-exist", + "wfid": "created-workflow-id", + "localizations": { + "en": { + "title": self.no_rems_cr.research_dataset["title"]["en"], + "infourl": settings.REMS["ETSIN_URL_TEMPLATE"] % self.no_rems_cr.identifier, + } + }, + "enabled": True, + "organization": {"organization/id": settings.REMS["ORGANIZATION"]}, + } + ] + + @responses.activate + def test_rems_logs(self): + with self.assertLogs() as logs: + call_command("create_missing_rems_items") + msgs = [log.msg % log.args for log in logs.records] + assert "Found 3 CatalogRecords with rems_identifiers" in msgs + assert "CatalogRecords with existing REMS entities: 1" in msgs + assert ( + f"Missing license for {self.no_license_cr.identifier}, not creating REMS entity" in msgs + ) + assert "Missing REMS entities: 2" in msgs + assert "Created REMS entities: 1" in msgs diff --git a/src/metax_api/tests/management/commands/loadinitialdata.py b/src/metax_api/tests/management/commands/loadinitialdata.py index fcc576b2..0c297634 100755 --- a/src/metax_api/tests/management/commands/loadinitialdata.py +++ b/src/metax_api/tests/management/commands/loadinitialdata.py @@ -6,6 +6,7 @@ # :license: MIT from io import StringIO +import unittest from django.conf import settings from django.core.management import call_command @@ -14,6 +15,7 @@ from metax_api.models import DataCatalog, FileStorage +@unittest.skip("out of date with current implementation") class LoadInitialDataTest(LiveServerTestCase): """ diff --git a/src/metax_api/tests/management/commands/mark_files_removed.py b/src/metax_api/tests/management/commands/mark_files_removed.py index 4cb0eaaf..90217c2d 100644 --- a/src/metax_api/tests/management/commands/mark_files_removed.py +++ b/src/metax_api/tests/management/commands/mark_files_removed.py @@ -1,35 +1,35 @@ from io import StringIO -import logging from django.core.management import call_command from django.test import TestCase from metax_api.models import File from metax_api.tests.utils import test_data_file_path -from metax_api.management.commands.mark_files_removed import Command -_logger = logging.getLogger(__name__) class RemoveFilesTest(TestCase): - def setUp(self): call_command("loaddata", test_data_file_path, verbosity=0) def test_command_output(self): project_identifier = "project_x" path_prefix = "/project_x_FROZEN/Experiment_X/Phase_1/2017" - path_prefix_file = None out = StringIO() args = [project_identifier] options = {"path_prefix": path_prefix, "stdout": out} - call_command('mark_files_removed', *args, **options) + call_command("mark_files_removed", *args, **options) + + self.assertIn( + f"Found 10 files to remove in project: {project_identifier} with path prefix: {path_prefix}", + out.getvalue(), + ) + self.assertIn("Removed 10 files", out.getvalue()) - self.assertIn(f'Found 10 files to remove in project: {project_identifier} with path prefix: {path_prefix}', out.getvalue()) - self.assertIn('Removed 10 files', out.getvalue()) + files = File.objects_unfiltered.filter( + project_identifier=project_identifier, file_path__startswith=path_prefix + ) - files = File.objects_unfiltered.filter(project_identifier = project_identifier, file_path__startswith = path_prefix) - self.assertEqual(10, len(files)) for file in files: - self.assertTrue(file.removed) \ No newline at end of file + self.assertTrue(file.removed) From 6c930fd96d29cbeb2359db8d38c4a8b892eb1dc2 Mon Sep 17 00:00:00 2001 From: Jori Niemi Date: Wed, 16 Feb 2022 07:13:10 +0000 Subject: [PATCH 157/160] CSCFAIRMETA-1198: Add missing dataset filters to swagger documentation --- src/metax_api/swagger/v1/swagger.yaml | 7 ++++++- src/metax_api/swagger/v2/swagger.yaml | 11 +++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/metax_api/swagger/v1/swagger.yaml b/src/metax_api/swagger/v1/swagger.yaml index cb605be1..724065b8 100755 --- a/src/metax_api/swagger/v1/swagger.yaml +++ b/src/metax_api/swagger/v1/swagger.yaml @@ -1001,6 +1001,11 @@ paths: description: Filter datasets with comma-separated list of IDA projects required: false type: string + - name: editor_permissions_user + in: query + description: Filter datasets where user has editor permissions + required: false + type: string - $ref: "#/parameters/fields" - $ref: "#/parameters/include_legacy" responses: @@ -2156,7 +2161,7 @@ parameters: description: Includes legacy datasets in query. Parameter needs to be given when fetching, modifying or deleting legacy datasets. required: false default: false - + type: boolean definitions: CatalogRecord: diff --git a/src/metax_api/swagger/v2/swagger.yaml b/src/metax_api/swagger/v2/swagger.yaml index 51a88991..24560201 100755 --- a/src/metax_api/swagger/v2/swagger.yaml +++ b/src/metax_api/swagger/v2/swagger.yaml @@ -1001,6 +1001,16 @@ paths: description: Returns datasets that can be edited with certain api version. Possible values are 1 and 2 type: integer required: false + - name: projects + in: query + description: Filter datasets with comma-separated list of IDA projects + required: false + type: string + - name: editor_permissions_user + in: query + description: Filter datasets where user has editor permissions + required: false + type: string - $ref: "#/parameters/include_user_metadata" - $ref: "#/parameters/fields" - $ref: "#/parameters/include_legacy" @@ -2452,6 +2462,7 @@ parameters: description: Includes legacy datasets in query. Parameter needs to be given when fetching, modifying or deleting legacy datasets. required: false default: false + type: boolean definitions: AddOrDeleteUserMetadata: From fb5e0ad5d529385c4d83b0e21f96cbe08f476a13 Mon Sep 17 00:00:00 2001 From: Jori Niemi Date: Fri, 18 Feb 2022 12:58:24 +0000 Subject: [PATCH 158/160] CSCFAIRMETA-1280: Fixes for OAI-PMH endpoint * Encode identifiers in SYKE urls * Add missing state checks * Refactor _handle_syke_urnresolver_metadata method that was unused --- .../api/oaipmh/base/metax_oai_server.py | 66 ++++++++----------- src/metax_api/tests/api/oaipmh/minimal_api.py | 6 ++ src/metax_api/tests/api/oaipmh/syke.py | 3 +- 3 files changed, 37 insertions(+), 38 deletions(-) diff --git a/src/metax_api/api/oaipmh/base/metax_oai_server.py b/src/metax_api/api/oaipmh/base/metax_oai_server.py index 2fd78634..0479a7dd 100755 --- a/src/metax_api/api/oaipmh/base/metax_oai_server.py +++ b/src/metax_api/api/oaipmh/base/metax_oai_server.py @@ -6,8 +6,10 @@ # :license: MIT import datetime +from urllib import parse from django.conf import settings +from django.db.models import QuerySet from django.utils import timezone from oaipmh import common from oaipmh.common import ResumptionOAIPMH @@ -62,13 +64,9 @@ def _get_default_set_filter(): return catalog_urns def _get_urnresolver_record_data(self, set, cursor, batch_size, from_=None, until=None): - proxy = CatalogRecord - if set == DATACATALOGS_SET: - proxy = DataCatalog - # Use unfiltered objects for fetching catalog records for urn resolver, since otherwise deleted objects # won't appear in the result. Get only active objects. - records = proxy.objects_unfiltered.filter(active=True) + records = CatalogRecord.objects_unfiltered.filter(active=True, state="published") if from_ and until: records = records.filter(date_modified__gte=from_, date_modified__lte=until) @@ -109,19 +107,19 @@ def _get_urnresolver_record_data(self, set, cursor, batch_size, from_=None, unti def _get_filtered_records_data( self, verb, metadata_prefix, set, cursor, batch_size, from_=None, until=None ): - proxy = CatalogRecord + query_set: QuerySet if set == DATACATALOGS_SET: - proxy = DataCatalog - - # For NON urn resolver, only get non-deleted active objects - query_set = proxy.objects.all() + query_set = DataCatalog.objects.all() + else: + # For NON urn resolver, only get non-deleted active CatalogRecords + query_set = CatalogRecord.objects.filter(active=True, state="published") if from_ and until: - query_set = proxy.objects.filter(date_modified__gte=from_, date_modified__lte=until) + query_set = query_set.filter(date_modified__gte=from_, date_modified__lte=until) elif from_: - query_set = proxy.objects.filter(date_modified__gte=from_) + query_set = query_set.filter(date_modified__gte=from_) elif until: - query_set = proxy.objects.filter(date_modified__lte=until) + query_set = query_set.filter(date_modified__lte=until) if set: if set == DATACATALOGS_SET: @@ -134,7 +132,6 @@ def _get_filtered_records_data( query_set = query_set.filter( data_catalog__catalog_json__identifier__in=self._get_default_set_filter() ) - query_set = query_set.filter(state="published") data = [] for record in query_set: @@ -171,16 +168,22 @@ def _get_filtered_records_data( cursor_end = cursor + batch_size if cursor + batch_size < len(data) else len(data) return data[cursor:cursor_end] - def _handle_syke_urnresolver_metadata(self, record): - identifiers = [] - preferred_identifier = record.research_dataset.get("preferred_identifier") - identifiers.append(preferred_identifier) - for id_obj in record.research_dataset.get("other_identifier", []): + def _get_syke_urnresolver_metadata_for_record(self, record): + metadatas = [] + pref_id = record["research_dataset"].get("preferred_identifier") + for id_obj in record["research_dataset"].get("other_identifier", []): if id_obj.get("notation", "").startswith("{"): uuid = id_obj["notation"] - identifiers.append(SYKE_URL_PREFIX_TEMPLATE % uuid) + metadatas.append( + { + "identifier": [ + SYKE_URL_PREFIX_TEMPLATE % parse.quote(uuid), + pref_id, + ] + } + ) break - return identifiers + return metadatas def _get_oai_dc_urnresolver_metadatas_for_record(self, record): """ @@ -197,23 +200,10 @@ def _get_oai_dc_urnresolver_metadatas_for_record(self, record): pref_id = record["research_dataset"].get("preferred_identifier") dc_id = record["data_catalog__catalog_json"].get("identifier") is_harvested = record["data_catalog__catalog_json"].get("harvested", False) is True - if record["research_dataset"].get("other_identifier") is not None: - other_ids = record["research_dataset"].get("other_identifier") - else: - other_ids = [] + other_ids = record["research_dataset"].get("other_identifier", []) if dc_id == "urn:nbn:fi:att:data-catalog-harvest-syke": - for id_obj in other_ids: - if id_obj.get("notation", "").startswith("{"): - metadatas.append( - { - "identifier": [ - SYKE_URL_PREFIX_TEMPLATE % id_obj["notation"], - pref_id, - ] - } - ) - break + metadatas.extend(self._get_syke_urnresolver_metadata_for_record(record)) elif dc_id not in settings.LEGACY_CATALOGS: resolution_url = settings.OAI["ETSIN_URL_TEMPLATE"] % record["identifier"] @@ -429,7 +419,9 @@ def identify(self): """Implement OAI-PMH verb Identify .""" first = ( CatalogRecord.objects.filter( - data_catalog__catalog_json__identifier__in=self._get_default_set_filter() + active=True, + state="published", + data_catalog__catalog_json__identifier__in=self._get_default_set_filter(), ) .order_by("date_created") .values_list("date_created", flat=True) diff --git a/src/metax_api/tests/api/oaipmh/minimal_api.py b/src/metax_api/tests/api/oaipmh/minimal_api.py index e64b0776..4f9a9b63 100755 --- a/src/metax_api/tests/api/oaipmh/minimal_api.py +++ b/src/metax_api/tests/api/oaipmh/minimal_api.py @@ -149,6 +149,12 @@ def test_list_identifiers_for_drafts(self): headers = self._get_results(response.content, "//o:header") self.assertFalse(len(headers) == len(allRecords), len(headers)) + # drafts should also be filtered when set=datasets is set explicitly + response = self.client.get("/oai/?verb=ListIdentifiers&metadataPrefix=oai_dc&set=datasets") + self.assertEqual(response.status_code, status.HTTP_200_OK) + headers = self._get_results(response.content, "//o:header") + self.assertFalse(len(headers) == len(allRecords), len(headers)) + def test_list_identifiers_from_datacatalogs_set(self): allRecords = DataCatalog.objects.all()[: settings.OAI["BATCH_SIZE"]] response = self.client.get( diff --git a/src/metax_api/tests/api/oaipmh/syke.py b/src/metax_api/tests/api/oaipmh/syke.py index 2c0622a2..bb3885ff 100755 --- a/src/metax_api/tests/api/oaipmh/syke.py +++ b/src/metax_api/tests/api/oaipmh/syke.py @@ -9,6 +9,7 @@ from django.core.management import call_command from rest_framework import status from rest_framework.test import APITestCase +from urllib import parse from metax_api.api.oaipmh.base.metax_oai_server import SYKE_URL_PREFIX_TEMPLATE from metax_api.models import CatalogRecord @@ -66,7 +67,7 @@ def test_get_urn_resolver_record(self): ) self.assertTrue(len(identifiers) == 1, response.content) - syke_url = SYKE_URL_PREFIX_TEMPLATE % "{55AB842F-9CED-4E80-A7E5-07A54F0AE4A4}" + syke_url = SYKE_URL_PREFIX_TEMPLATE % parse.quote("{55AB842F-9CED-4E80-A7E5-07A54F0AE4A4}") identifiers = self._get_results( response.content, '//o:record/o:metadata/oai_dc:dc/dc:identifier[text()="%s"]' % syke_url, From 387ec9db9c0739f800130117c869f4ad48768b9b Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Wed, 23 Feb 2022 14:07:42 +0200 Subject: [PATCH 159/160] CSCFAIRMETA-1394: Update owner of a dataset --- .../migrations/0046_replace_dataset_owner.py | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 src/metax_api/migrations/0046_replace_dataset_owner.py diff --git a/src/metax_api/migrations/0046_replace_dataset_owner.py b/src/metax_api/migrations/0046_replace_dataset_owner.py new file mode 100644 index 00000000..9d7391e8 --- /dev/null +++ b/src/metax_api/migrations/0046_replace_dataset_owner.py @@ -0,0 +1,58 @@ +from django.db import migrations + +import logging + +logger = logging.getLogger(__name__) + +def replace_metadata_provider_user(cr, old_user, new_user): + logger.info(f"replacing metadata_provider_user: {old_user} with new_user: {new_user}") + if cr.metadata_provider_user: + if cr.metadata_provider_user == old_user: + cr.metadata_provider_user = new_user + logger.info("metadata_provider_user changed") + +def change_metadata_provider_user(apps, schema_editor): + new_user = "frickmar" + old_user = "mfrick@oulu.fi" + CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') + crs = CatalogRecord.objects.filter(metadata_provider_user=old_user) + logger.info(f"Found {len(crs)} catalog records to update") + for cr in crs: + try: + logger.info(f"changing metadata_provider_user for cr {cr.identifier}") + replace_metadata_provider_user(cr, old_user, new_user) + # cr.editor_permissions.user_id = "asdf" + cr.save() + cr.editor_permissions.users.update(user_id=new_user) + logger.info("cr save successful") + except Exception as e: + logger.error(e) + + +def revert(apps, schema_editor): + new_user = "frickmar" + old_user = "mfrick@oulu.fi" + CatalogRecord = apps.get_model('metax_api', 'CatalogRecord') + crs = CatalogRecord.objects.filter(metadata_provider_user=new_user) + logger.info(f"Found {len(crs)} catalog records to update") + for cr in crs: + try: + logger.info(f"changing metadata_provider_user for cr {cr.identifier}") + replace_metadata_provider_user(cr, new_user, old_user) + # cr.editor_permissions.user_id = old_user + cr.save() + cr.editor_permissions.users.update(user_id=old_user) + logger.info("cr save successful") + except Exception as e: + logger.error(e) + logger.error(e) + +class Migration(migrations.Migration): + + dependencies = [ + ('metax_api', '0045_add_publish_fields_to_catalogs'), + ] + + operations = [ + migrations.RunPython(change_metadata_provider_user, revert), + ] \ No newline at end of file From ea5fbbc7f226161d7abd75d2ba5a45a3eb821377 Mon Sep 17 00:00:00 2001 From: Atro Himanen Date: Fri, 25 Feb 2022 09:38:14 +0200 Subject: [PATCH 160/160] CSCFAIRMETA-1336: Created a helper function for getting the actor data --- .../api/oaipmh/base/metax_oai_server.py | 41 +++++++------------ 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/src/metax_api/api/oaipmh/base/metax_oai_server.py b/src/metax_api/api/oaipmh/base/metax_oai_server.py index e34b62df..22665e4b 100755 --- a/src/metax_api/api/oaipmh/base/metax_oai_server.py +++ b/src/metax_api/api/oaipmh/base/metax_oai_server.py @@ -235,6 +235,17 @@ def _get_oaic_dc_value(self, value, lang=None): valueDict["lang"] = lang return valueDict + def _get_actors_oaic_dc_value(self, actors): + names = [] + for actor in actors: + if "name" in actor: + if isinstance(actor["name"], dict): + for key, val in actor["name"].items(): + names.append(self._get_oaic_dc_value(val, key)) + else: + names.append(self._get_oaic_dc_value(actor.get("name"))) + return names + def _get_oai_dc_metadata(self, record, json): identifier = [] if "preferred_identifier" in json: @@ -247,15 +258,7 @@ def _get_oai_dc_metadata(self, record, json): for key, value in title_data.items(): title.append(self._get_oaic_dc_value(value, key)) - creator = [] - creator_data = json.get("creator", []) - for value in creator_data: - if "name" in value: - if isinstance(value["name"], dict): - for key, val in value["name"].items(): - creator.append(self._get_oaic_dc_value(val, key)) - else: - creator.append(self._get_oaic_dc_value(value.get("name"))) + creator = self._get_actors_oaic_dc_value(json.get("creator", [])) subject = [] subject_data = json.get("keyword", []) @@ -279,23 +282,9 @@ def _get_oai_dc_metadata(self, record, json): else: desc.append(desc_data) - publisher = [] - publisher_data = json.get("publisher", {}) - if publisher_data.get("@type") == "Person": - publisher.append(self._get_oaic_dc_value(publisher_data.get("name"))) - else: - for key, value in publisher_data.get("name", {}).items(): - publisher.append(self._get_oaic_dc_value(value, key)) - - contributor = [] - contributor_data = json.get("contributor", []) - for value in contributor_data: - if "name" in value: - if isinstance(value["name"], dict): - for key, val in value["name"].items(): - contributor.append(self._get_oaic_dc_value(val, key)) - else: - contributor.append(self._get_oaic_dc_value(value.get("name"))) + publisher = self._get_actors_oaic_dc_value([json.get("publisher", {})]) + + contributor = self._get_actors_oaic_dc_value([json.get("contributor", [])]) date = self._get_oaic_dc_value(str(record.date_created))