From 724f4084966ad5939e83dd8eda57fce3812d0b2d Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Mon, 28 Aug 2023 14:42:15 +0200 Subject: [PATCH 01/42] streaming parser; reporting on last visited tag; maybe fix through not re-enconding file's contents simplified, rearranged code allowing garbage collection capturing error context and providing it to user using repr to encode bytes as a string literal simplified --- docker-qgis/process_projectfile.py | 76 ++++++++++++++++++++---------- docker-qgis/utils.py | 76 ++++++++++++++++++++---------- 2 files changed, 102 insertions(+), 50 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 3a4e2a358..3aa511370 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -1,9 +1,17 @@ +import io import logging from pathlib import Path -from typing import Dict +from typing import NamedTuple, Optional from xml.etree import ElementTree -from qfieldcloud.qgis.utils import BaseException, get_layers_data, layers_data_to_string +from qfieldcloud.qgis.utils import ( + FailedThumbnailGenerationException, + InvalidFileExtensionException, + InvalidXmlFileException, + ProjectFileNotFoundException, + get_layers_data, + layers_data_to_string, +) from qgis.core import QgsMapRendererParallelJob, QgsMapSettings, QgsProject from qgis.PyQt.QtCore import QEventLoop, QSize from qgis.PyQt.QtGui import QColor @@ -11,30 +19,43 @@ logger = logging.getLogger("PROCPRJ") -class ProjectFileNotFoundException(BaseException): - message = 'Project file "%(project_filename)s" does not exist' +class XmlLocationError(NamedTuple): + line: int + column: int -class InvalidFileExtensionException(BaseException): - message = ( - 'Project file "%(project_filename)s" has unknown file extension "%(extension)s"' - ) - - -class InvalidXmlFileException(BaseException): - message = "Project file is an invalid XML document:\n%(xml_error)s" +def get_location(invalid_token_error_msg: str) -> Optional[XmlLocationError]: + """Get column and line numbers from the provided error message.""" + if "invalid token" not in invalid_token_error_msg.casefold(): + logger.error("Unable to find 'invalid token' details in the given message") + return None + _, details = invalid_token_error_msg.split(":") + line, column = details.split(",") + _, line_number = line.strip().split(" ") + _, column_number = column.strip().split(" ") -class InvalidQgisFileException(BaseException): - message = 'Project file "%(project_filename)s" is invalid QGIS file:\n%(error)s' + return XmlLocationError(int(line_number), int(column_number)) -class InvalidLayersException(BaseException): - message = 'Project file "%(project_filename)s" contains invalid layers' +def contextualize(invalid_token_error_msg: str, fh: io.BufferedReader) -> Optional[str]: + """Get a sanitized slice of the line where the exception occurred, with all faulty occurrences sanitized.""" + location = get_location(invalid_token_error_msg) + if location: + substitute = "?" + fh.seek(0) + for cursor_pos, line in enumerate(fh, start=1): + if location.line == cursor_pos: + faulty_char = line[location.column] + suffix_slice = line[: location.column - 1] + clean_safe_slice = suffix_slice.decode("utf-8").strip() + substitute + return f""" + Unable to parse this character: {repr(faulty_char)}. + It was replaced by '{substitute}' on line {location.line} that starts with: '{clean_safe_slice}' + """ -class FailedThumbnailGenerationException(BaseException): - message = "Failed to generate project thumbnail:\n%(reason)s" + return None def check_valid_project_file(project_filename: Path) -> None: @@ -44,13 +65,16 @@ def check_valid_project_file(project_filename: Path) -> None: raise ProjectFileNotFoundException(project_filename=project_filename) if project_filename.suffix == ".qgs": - try: - with open(project_filename) as f: - ElementTree.fromstring(f.read()) - except ElementTree.ParseError as err: - raise InvalidXmlFileException( - project_filename=project_filename, xml_error=err - ) + with open(project_filename, "rb") as fh: + try: + for event, elem in ElementTree.iterparse(fh): + continue + except ElementTree.ParseError as error: + error_msg = str(error) + raise InvalidXmlFileException( + project_filename=project_filename, + xml_error=contextualize(error_msg, fh) or error_msg, + ) elif project_filename.suffix != ".qgz": raise InvalidFileExtensionException( project_filename=project_filename, extension=project_filename.suffix @@ -71,7 +95,7 @@ def load_project_file(project_filename: Path) -> QgsProject: return project -def extract_project_details(project: QgsProject) -> Dict[str, str]: +def extract_project_details(project: QgsProject) -> dict[str, str]: """Extract project details""" logger.info("Extract project details…") diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index d8e5c9aac..95ba03844 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -15,7 +15,7 @@ from contextlib import contextmanager from datetime import datetime from pathlib import Path -from typing import IO, Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, IO, Optional from libqfieldsync.layer import LayerSource from qfieldcloud_sdk import sdk @@ -39,6 +39,44 @@ qgs_msglog_logger.setLevel(logging.DEBUG) +class QfcWorkerException(Exception): + """QFieldCloud Exception""" + + message = "" + + def __init__(self, message: str = None, **kwargs): + self.message = (message or self.message) % kwargs + self.details = kwargs + + super().__init__(self.message) + + +class ProjectFileNotFoundException(QfcWorkerException): + message = 'Project file "%(project_filename)s" does not exist' + + +class InvalidFileExtensionException(QfcWorkerException): + message = ( + 'Project file "%(project_filename)s" has unknown file extension "%(extension)s"' + ) + + +class InvalidXmlFileException(QfcWorkerException): + message = "Project file is an invalid XML document:\n%(xml_error)s" + + +class InvalidQgisFileException(QfcWorkerException): + message = 'Project file "%(project_filename)s" is invalid QGIS file:\n%(error)s' + + +class InvalidLayersException(QfcWorkerException): + message = 'Project file "%(project_filename)s" contains invalid layers' + + +class FailedThumbnailGenerationException(QfcWorkerException): + message = "Failed to generate project thumbnail:\n%(reason)s" + + def _qt_message_handler(mode, context, message): log_level = logging.DEBUG if mode == QtCore.QtDebugMsg: @@ -268,7 +306,7 @@ def __init__( id: str, version: str, name: str, - steps: List["Step"], + steps: list["Step"], description: str = "", ): self.id = id @@ -331,9 +369,9 @@ def __init__( id: str, name: str, method: Callable, - arguments: Dict[str, Any] = {}, - return_names: List[str] = [], - outputs: List[str] = [], + arguments: dict[str, Any] = {}, + return_names: list[str] = [], + outputs: list[str] = [], ): self.id = id self.name = name @@ -366,18 +404,6 @@ def eval(self, root: Path) -> Path: return path -class BaseException(Exception): - """QFieldCloud Exception""" - - message = "" - - def __init__(self, message: str = None, **kwargs): - self.message = (message or self.message) % kwargs - self.details = kwargs - - super().__init__(self.message) - - @contextmanager def logger_context(step: Step): log_uuid = uuid.uuid4() @@ -437,7 +463,7 @@ def get_layer_filename(layer: QgsMapLayer) -> Optional[str]: return None -def extract_project_details(project: QgsProject) -> Dict[str, str]: +def extract_project_details(project: QgsProject) -> dict[str, str]: """Extract project details""" map_settings = QgsMapSettings() details = {} @@ -487,8 +513,8 @@ def json_default(obj): def run_workflow( workflow: Workflow, - feedback_filename: Optional[Union[IO, Path]], -) -> Dict: + feedback_filename: Path | IO, +) -> dict[str, Any]: """Executes the steps required to run a task and return structured feedback from the execution Each step has a method that is executed. @@ -501,7 +527,7 @@ def run_workflow( workflow (Workflow): workflow to be executed feedback_filename (Optional[Union[IO, Path]]): write feedback to an IO device, to Path filename, or don't write it """ - feedback: Dict[str, Any] = { + feedback: dict[str, Any] = { "feedback_version": "2.0", "workflow_version": workflow.version, "workflow_id": workflow.id, @@ -552,10 +578,12 @@ def run_workflow( feedback["error_type"] = "API_OTHER" elif isinstance(err, FileNotFoundError): feedback["error_type"] = "FILE_NOT_FOUND" + elif isinstance(err, InvalidXmlFileException): + feedback["error_type"] = "INVALID_PROJECT_FILE" else: feedback["error_type"] = "UNKNOWN" - (_type, _value, tb) = sys.exc_info() + _type, _value, tb = sys.exc_info() feedback["error_class"] = type(err).__name__ feedback["error_stack"] = traceback.format_tb(tb) finally: @@ -596,7 +624,7 @@ def run_workflow( return feedback -def get_layers_data(project: QgsProject) -> Dict[str, Dict]: +def get_layers_data(project: QgsProject) -> dict[str, dict]: layers_by_id = {} for layer in project.mapLayers().values(): @@ -711,7 +739,7 @@ def get_file_md5sum(filename: str) -> str: return hasher.hexdigest() -def files_list_to_string(files: List[Dict[str, Any]]) -> str: +def files_list_to_string(files: list[dict[str, Any]]) -> str: table = [ [ d["name"], From 458ad8c7eb4875f3a306c008161a0fd36551d84e Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Mon, 4 Sep 2023 09:20:37 +0200 Subject: [PATCH 02/42] formatter --- docker-qgis/process_projectfile.py | 24 +++++++++++++++++------- docker-qgis/utils.py | 6 +++--- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 3aa511370..9677ed7f8 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -38,8 +38,13 @@ def get_location(invalid_token_error_msg: str) -> Optional[XmlLocationError]: return XmlLocationError(int(line_number), int(column_number)) -def contextualize(invalid_token_error_msg: str, fh: io.BufferedReader) -> Optional[str]: - """Get a sanitized slice of the line where the exception occurred, with all faulty occurrences sanitized.""" +def contextualize( + invalid_token_error_msg: str, fh: io.BufferedReader +) -> Optional[tuple[str]]: + """ + Get a sanitized slice of the line where the exception occurred, with all faulty occurrences sanitized. + Returns the string as a 3-substring tuple to avoid tripping Docker Compose's stdout limitations. + """ location = get_location(invalid_token_error_msg) if location: substitute = "?" @@ -50,10 +55,11 @@ def contextualize(invalid_token_error_msg: str, fh: io.BufferedReader) -> Option suffix_slice = line[: location.column - 1] clean_safe_slice = suffix_slice.decode("utf-8").strip() + substitute - return f""" - Unable to parse this character: {repr(faulty_char)}. - It was replaced by '{substitute}' on line {location.line} that starts with: '{clean_safe_slice}' - """ + return ( + f"Unable to parse this character: {repr(faulty_char)}", + f"It was replaced by '{substitute}' on line {location.line} that starts with:", + clean_safe_slice, + ) return None @@ -71,9 +77,13 @@ def check_valid_project_file(project_filename: Path) -> None: continue except ElementTree.ParseError as error: error_msg = str(error) + xml_error = contextualize(error_msg, fh) + if xml_error: + for segment in xml_error: + logger.error(segment) raise InvalidXmlFileException( + xml_error="".join(xml_error) if xml_error else error_msg, project_filename=project_filename, - xml_error=contextualize(error_msg, fh) or error_msg, ) elif project_filename.suffix != ".qgz": raise InvalidFileExtensionException( diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index 95ba03844..b8d2a4171 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -15,7 +15,7 @@ from contextlib import contextmanager from datetime import datetime from pathlib import Path -from typing import Any, Callable, IO, Optional +from typing import IO, Any, Callable, Optional from libqfieldsync.layer import LayerSource from qfieldcloud_sdk import sdk @@ -513,7 +513,7 @@ def json_default(obj): def run_workflow( workflow: Workflow, - feedback_filename: Path | IO, + feedback_filename: Optional[Path | IO], ) -> dict[str, Any]: """Executes the steps required to run a task and return structured feedback from the execution @@ -525,7 +525,7 @@ def run_workflow( Args: workflow (Workflow): workflow to be executed - feedback_filename (Optional[Union[IO, Path]]): write feedback to an IO device, to Path filename, or don't write it + feedback_filename (IO | Path): write feedback to an IO device, to Path filename, or don't write it """ feedback: dict[str, Any] = { "feedback_version": "2.0", From 3a3cfebf2db153bf9e262b9a063cf912f5926291 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Mon, 4 Sep 2023 10:43:25 +0200 Subject: [PATCH 03/42] format --- docker-qgis/process_projectfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 9677ed7f8..4d1db0e22 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -40,7 +40,7 @@ def get_location(invalid_token_error_msg: str) -> Optional[XmlLocationError]: def contextualize( invalid_token_error_msg: str, fh: io.BufferedReader -) -> Optional[tuple[str]]: +) -> Optional[tuple[str, str, str]]: """ Get a sanitized slice of the line where the exception occurred, with all faulty occurrences sanitized. Returns the string as a 3-substring tuple to avoid tripping Docker Compose's stdout limitations. From 5a43160beea8f02db9e7a9660b063e88116f9a9a Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Tue, 5 Sep 2023 09:08:01 +0200 Subject: [PATCH 04/42] docstring --- docker-qgis/process_projectfile.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 4d1db0e22..fc58020cc 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -19,12 +19,12 @@ logger = logging.getLogger("PROCPRJ") -class XmlLocationError(NamedTuple): +class XmlErrorLocation(NamedTuple): line: int column: int -def get_location(invalid_token_error_msg: str) -> Optional[XmlLocationError]: +def get_location(invalid_token_error_msg: str) -> Optional[XmlErrorLocation]: """Get column and line numbers from the provided error message.""" if "invalid token" not in invalid_token_error_msg.casefold(): logger.error("Unable to find 'invalid token' details in the given message") @@ -35,7 +35,7 @@ def get_location(invalid_token_error_msg: str) -> Optional[XmlLocationError]: _, line_number = line.strip().split(" ") _, column_number = column.strip().split(" ") - return XmlLocationError(int(line_number), int(column_number)) + return XmlErrorLocation(int(line_number), int(column_number)) def contextualize( @@ -44,6 +44,7 @@ def contextualize( """ Get a sanitized slice of the line where the exception occurred, with all faulty occurrences sanitized. Returns the string as a 3-substring tuple to avoid tripping Docker Compose's stdout limitations. + Makes no use of '.decode(..., errors="replace")' because it still throws on some entities. """ location = get_location(invalid_token_error_msg) if location: From 3f004270a6c97bfa01d18b22724dcc74c71d8cf7 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Tue, 5 Sep 2023 15:25:51 +0200 Subject: [PATCH 05/42] adding html escaping --- docker-qgis/process_projectfile.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index fc58020cc..7b7bf5e43 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -1,3 +1,4 @@ +import html import io import logging from pathlib import Path @@ -42,8 +43,7 @@ def contextualize( invalid_token_error_msg: str, fh: io.BufferedReader ) -> Optional[tuple[str, str, str]]: """ - Get a sanitized slice of the line where the exception occurred, with all faulty occurrences sanitized. - Returns the string as a 3-substring tuple to avoid tripping Docker Compose's stdout limitations. + Get an html-safe slice of the line where the exception occurred, with all faulty occurrences sanitized. Makes no use of '.decode(..., errors="replace")' because it still throws on some entities. """ location = get_location(invalid_token_error_msg) @@ -59,7 +59,7 @@ def contextualize( return ( f"Unable to parse this character: {repr(faulty_char)}", f"It was replaced by '{substitute}' on line {location.line} that starts with:", - clean_safe_slice, + html.escape(clean_safe_slice), ) return None From 14831d3db74f5b7c51753ddcde19504e7509a93b Mon Sep 17 00:00:00 2001 From: Adrien Date: Thu, 7 Sep 2023 09:01:52 +0200 Subject: [PATCH 06/42] Apply suggestions from code review Co-authored-by: Ivan Ivanov --- docker-qgis/process_projectfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 7b7bf5e43..1917f33a9 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -57,8 +57,8 @@ def contextualize( clean_safe_slice = suffix_slice.decode("utf-8").strip() + substitute return ( - f"Unable to parse this character: {repr(faulty_char)}", - f"It was replaced by '{substitute}' on line {location.line} that starts with:", + f"Unable to parse character: {repr(faulty_char)}", + f"Replaced by '{substitute}' on line {location.line} that starts with:", html.escape(clean_safe_slice), ) From 26c088637b9dd4121219ce361a7ad14c6f86f6c8 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 09:02:29 +0200 Subject: [PATCH 07/42] accomodated --- docker-qgis/process_projectfile.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 1917f33a9..9eff0ffa6 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -25,7 +25,9 @@ class XmlErrorLocation(NamedTuple): column: int -def get_location(invalid_token_error_msg: str) -> Optional[XmlErrorLocation]: +def get_qgis_xml_error_location( + invalid_token_error_msg: str, +) -> Optional[XmlErrorLocation]: """Get column and line numbers from the provided error message.""" if "invalid token" not in invalid_token_error_msg.casefold(): logger.error("Unable to find 'invalid token' details in the given message") @@ -39,14 +41,11 @@ def get_location(invalid_token_error_msg: str) -> Optional[XmlErrorLocation]: return XmlErrorLocation(int(line_number), int(column_number)) -def contextualize( +def get_qgis_xml_error_context( invalid_token_error_msg: str, fh: io.BufferedReader ) -> Optional[tuple[str, str, str]]: - """ - Get an html-safe slice of the line where the exception occurred, with all faulty occurrences sanitized. - Makes no use of '.decode(..., errors="replace")' because it still throws on some entities. - """ - location = get_location(invalid_token_error_msg) + """Get a slice of the line where the exception occurred, with all faulty occurrences sanitized.""" + location = get_qgis_xml_error_location(invalid_token_error_msg) if location: substitute = "?" fh.seek(0) @@ -78,7 +77,7 @@ def check_valid_project_file(project_filename: Path) -> None: continue except ElementTree.ParseError as error: error_msg = str(error) - xml_error = contextualize(error_msg, fh) + xml_error = get_qgis_xml_error_context(error_msg, fh) if xml_error: for segment in xml_error: logger.error(segment) From df2ab59bd3897fe9799caa51c9dc23cfb0bfacee Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 09:49:54 +0200 Subject: [PATCH 08/42] moved to utils where needed to --- docker-qgis/process_projectfile.py | 48 +----------------------------- docker-qgis/utils.py | 45 +++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 48 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index 9eff0ffa6..b047a7e3f 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -1,8 +1,5 @@ -import html -import io import logging from pathlib import Path -from typing import NamedTuple, Optional from xml.etree import ElementTree from qfieldcloud.qgis.utils import ( @@ -11,6 +8,7 @@ InvalidXmlFileException, ProjectFileNotFoundException, get_layers_data, + get_qgis_xml_error_context, layers_data_to_string, ) from qgis.core import QgsMapRendererParallelJob, QgsMapSettings, QgsProject @@ -20,50 +18,6 @@ logger = logging.getLogger("PROCPRJ") -class XmlErrorLocation(NamedTuple): - line: int - column: int - - -def get_qgis_xml_error_location( - invalid_token_error_msg: str, -) -> Optional[XmlErrorLocation]: - """Get column and line numbers from the provided error message.""" - if "invalid token" not in invalid_token_error_msg.casefold(): - logger.error("Unable to find 'invalid token' details in the given message") - return None - - _, details = invalid_token_error_msg.split(":") - line, column = details.split(",") - _, line_number = line.strip().split(" ") - _, column_number = column.strip().split(" ") - - return XmlErrorLocation(int(line_number), int(column_number)) - - -def get_qgis_xml_error_context( - invalid_token_error_msg: str, fh: io.BufferedReader -) -> Optional[tuple[str, str, str]]: - """Get a slice of the line where the exception occurred, with all faulty occurrences sanitized.""" - location = get_qgis_xml_error_location(invalid_token_error_msg) - if location: - substitute = "?" - fh.seek(0) - for cursor_pos, line in enumerate(fh, start=1): - if location.line == cursor_pos: - faulty_char = line[location.column] - suffix_slice = line[: location.column - 1] - clean_safe_slice = suffix_slice.decode("utf-8").strip() + substitute - - return ( - f"Unable to parse character: {repr(faulty_char)}", - f"Replaced by '{substitute}' on line {location.line} that starts with:", - html.escape(clean_safe_slice), - ) - - return None - - def check_valid_project_file(project_filename: Path) -> None: logger.info("Check QGIS project file validity…") diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index b8d2a4171..153ea88af 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -1,6 +1,7 @@ import atexit import hashlib import inspect +import html import io import json import logging @@ -15,7 +16,7 @@ from contextlib import contextmanager from datetime import datetime from pathlib import Path -from typing import IO, Any, Callable, Optional +from typing import IO, Any, Callable, NamedTuple, Optional from libqfieldsync.layer import LayerSource from qfieldcloud_sdk import sdk @@ -842,3 +843,45 @@ def setup_basic_logging_config(): for handler in logging.root.handlers: handler.setFormatter(formatter) + +class XmlErrorLocation(NamedTuple): + line: int + column: int + + +def get_qgis_xml_error_location( + invalid_token_error_msg: str, +) -> Optional[XmlErrorLocation]: + """Get column and line numbers from the provided error message.""" + if "invalid token" not in invalid_token_error_msg.casefold(): + return None + + _, details = invalid_token_error_msg.split(":") + line, column = details.split(",") + _, line_number = line.strip().split(" ") + _, column_number = column.strip().split(" ") + + return XmlErrorLocation(int(line_number), int(column_number)) + + +def get_qgis_xml_error_context( + invalid_token_error_msg: str, fh: io.BufferedReader +) -> Optional[tuple[str, str, str]]: + """Get a slice of the line where the exception occurred, with all faulty occurrences sanitized.""" + location = get_qgis_xml_error_location(invalid_token_error_msg) + if location: + substitute = "?" + fh.seek(0) + for cursor_pos, line in enumerate(fh, start=1): + if location.line == cursor_pos: + faulty_char = line[location.column] + suffix_slice = line[: location.column - 1] + clean_safe_slice = suffix_slice.decode("utf-8").strip() + substitute + + return ( + f"Unable to parse character: {repr(faulty_char)}", + f"Replaced by '{substitute}' on line {location.line} that starts with:", + html.escape(clean_safe_slice), + ) + + return None From 641240129be918d14edd1d56aed2bec4d4865788 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 09:52:10 +0200 Subject: [PATCH 09/42] sorting depeds for isort --- docker-qgis/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index 153ea88af..ed7d8228f 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -1,8 +1,8 @@ import atexit import hashlib -import inspect import html import io +import inspect import json import logging import os From 16c10e36eab886534e3f28666a426b828e72c083 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 09:54:42 +0200 Subject: [PATCH 10/42] when your local pre-commit re-orders in a way that makes the remove pre-commit throw, you feel dead inside --- docker-qgis/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index ed7d8228f..610961717 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -1,8 +1,8 @@ import atexit import hashlib import html -import io import inspect +import io import json import logging import os From ca244817778b6af9a64cfb5d0a8a306205eba55f Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 09:58:26 +0200 Subject: [PATCH 11/42] format --- docker-qgis/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index 610961717..dfb8bc955 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -844,6 +844,7 @@ def setup_basic_logging_config(): for handler in logging.root.handlers: handler.setFormatter(formatter) + class XmlErrorLocation(NamedTuple): line: int column: int From c2f9d8ab6e894258a5dda274e0f0cd968bfbdee5 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 14:07:46 +0200 Subject: [PATCH 12/42] escaping feedback json field from custom json decoder --- docker-app/qfieldcloud/core/models.py | 17 +++++++++++++++-- docker-qgis/utils.py | 3 +-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/docker-app/qfieldcloud/core/models.py b/docker-app/qfieldcloud/core/models.py index 20b5ae6ae..659b13c57 100644 --- a/docker-app/qfieldcloud/core/models.py +++ b/docker-app/qfieldcloud/core/models.py @@ -1,3 +1,4 @@ +import json import logging import os import secrets @@ -5,7 +6,7 @@ import uuid from datetime import datetime, timedelta from enum import Enum -from typing import List, Optional, cast +from typing import Any, List, Optional, cast import django_cryptography.fields from deprecated import deprecated @@ -21,6 +22,7 @@ from django.db.models.aggregates import Count, Sum from django.db.models.fields.json import JSONField from django.urls import reverse_lazy +from django.utils import html from django.utils.functional import cached_property from django.utils.translation import gettext as _ from model_utils.managers import InheritanceManager, InheritanceManagerMixin @@ -1609,6 +1611,17 @@ def method(self): return self.content.get("method") +class HtmlSafeDecoder(json.JSONDecoder): + def __init__(self, *args, **kwargs): + super().__init__(self, object_hook=self.object_hook, *args, **kwargs) + + def object_hook(self, obj) -> dict[str, Any]: + """Ensure that the value at `error` is html-escaped.""" + if "error" in obj: + obj["error"] = html.escape(obj["error"]) + return obj + + class Job(models.Model): objects = InheritanceManager() @@ -1637,7 +1650,7 @@ class Status(models.TextChoices): max_length=32, choices=Status.choices, default=Status.PENDING, db_index=True ) output = models.TextField(null=True) - feedback = JSONField(null=True) + feedback = JSONField(null=True, decoder=HtmlSafeDecoder) created_by = models.ForeignKey(User, on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True, db_index=True) updated_at = models.DateTimeField(auto_now=True, db_index=True) diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index dfb8bc955..5ea3c0b50 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -1,6 +1,5 @@ import atexit import hashlib -import html import inspect import io import json @@ -882,7 +881,7 @@ def get_qgis_xml_error_context( return ( f"Unable to parse character: {repr(faulty_char)}", f"Replaced by '{substitute}' on line {location.line} that starts with:", - html.escape(clean_safe_slice), + clean_safe_slice, ) return None From d0bb28425ec40cfb3c8fbcc35a6b2442106df0d6 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Thu, 7 Sep 2023 14:30:23 +0200 Subject: [PATCH 13/42] property-based escaping --- docker-app/qfieldcloud/core/models.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/docker-app/qfieldcloud/core/models.py b/docker-app/qfieldcloud/core/models.py index 659b13c57..03458ed1f 100644 --- a/docker-app/qfieldcloud/core/models.py +++ b/docker-app/qfieldcloud/core/models.py @@ -1,4 +1,4 @@ -import json +import html import logging import os import secrets @@ -6,7 +6,7 @@ import uuid from datetime import datetime, timedelta from enum import Enum -from typing import Any, List, Optional, cast +from typing import List, Optional, cast import django_cryptography.fields from deprecated import deprecated @@ -22,7 +22,6 @@ from django.db.models.aggregates import Count, Sum from django.db.models.fields.json import JSONField from django.urls import reverse_lazy -from django.utils import html from django.utils.functional import cached_property from django.utils.translation import gettext as _ from model_utils.managers import InheritanceManager, InheritanceManagerMixin @@ -1611,17 +1610,6 @@ def method(self): return self.content.get("method") -class HtmlSafeDecoder(json.JSONDecoder): - def __init__(self, *args, **kwargs): - super().__init__(self, object_hook=self.object_hook, *args, **kwargs) - - def object_hook(self, obj) -> dict[str, Any]: - """Ensure that the value at `error` is html-escaped.""" - if "error" in obj: - obj["error"] = html.escape(obj["error"]) - return obj - - class Job(models.Model): objects = InheritanceManager() @@ -1650,7 +1638,7 @@ class Status(models.TextChoices): max_length=32, choices=Status.choices, default=Status.PENDING, db_index=True ) output = models.TextField(null=True) - feedback = JSONField(null=True, decoder=HtmlSafeDecoder) + feedback = JSONField(null=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True, db_index=True) updated_at = models.DateTimeField(auto_now=True, db_index=True) @@ -1663,6 +1651,10 @@ class Status(models.TextChoices): max_length=64, default="", blank=True, db_index=True ) + @property + def escaped_output(self) -> str: + return html.escape(self.output) + @property def short_id(self) -> str: return str(self.id)[0:8] From 957d7ee6b2bc16284549bdde63808ea1f7f326a7 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Fri, 8 Sep 2023 07:42:31 +0200 Subject: [PATCH 14/42] simplified since we no longer need to call logger multiple times --- docker-qgis/process_projectfile.py | 6 +----- docker-qgis/utils.py | 8 ++------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/docker-qgis/process_projectfile.py b/docker-qgis/process_projectfile.py index b047a7e3f..49d37b1a1 100644 --- a/docker-qgis/process_projectfile.py +++ b/docker-qgis/process_projectfile.py @@ -31,12 +31,8 @@ def check_valid_project_file(project_filename: Path) -> None: continue except ElementTree.ParseError as error: error_msg = str(error) - xml_error = get_qgis_xml_error_context(error_msg, fh) - if xml_error: - for segment in xml_error: - logger.error(segment) raise InvalidXmlFileException( - xml_error="".join(xml_error) if xml_error else error_msg, + xml_error=get_qgis_xml_error_context(error_msg, fh) or error_msg, project_filename=project_filename, ) elif project_filename.suffix != ".qgz": diff --git a/docker-qgis/utils.py b/docker-qgis/utils.py index 5ea3c0b50..6866986ce 100644 --- a/docker-qgis/utils.py +++ b/docker-qgis/utils.py @@ -866,7 +866,7 @@ def get_qgis_xml_error_location( def get_qgis_xml_error_context( invalid_token_error_msg: str, fh: io.BufferedReader -) -> Optional[tuple[str, str, str]]: +) -> Optional[str]: """Get a slice of the line where the exception occurred, with all faulty occurrences sanitized.""" location = get_qgis_xml_error_location(invalid_token_error_msg) if location: @@ -878,10 +878,6 @@ def get_qgis_xml_error_context( suffix_slice = line[: location.column - 1] clean_safe_slice = suffix_slice.decode("utf-8").strip() + substitute - return ( - f"Unable to parse character: {repr(faulty_char)}", - f"Replaced by '{substitute}' on line {location.line} that starts with:", - clean_safe_slice, - ) + return f"Unable to parse character: {repr(faulty_char)}. Replaced by '{substitute}' on line {location.line} that starts with: {clean_safe_slice}" return None From f207add7ac2b9fd61b9cb80f6f13e91fbd2b5be5 Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Fri, 8 Sep 2023 10:55:08 +0200 Subject: [PATCH 15/42] escaping in template --- docker-app/qfieldcloud/core/models.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docker-app/qfieldcloud/core/models.py b/docker-app/qfieldcloud/core/models.py index 03458ed1f..20b5ae6ae 100644 --- a/docker-app/qfieldcloud/core/models.py +++ b/docker-app/qfieldcloud/core/models.py @@ -1,4 +1,3 @@ -import html import logging import os import secrets @@ -1651,10 +1650,6 @@ class Status(models.TextChoices): max_length=64, default="", blank=True, db_index=True ) - @property - def escaped_output(self) -> str: - return html.escape(self.output) - @property def short_id(self) -> str: return str(self.id)[0:8] From b7e63621caaf951997397d8a9ba8700f3a2f8762 Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Wed, 13 Sep 2023 15:39:27 +0300 Subject: [PATCH 16/42] Bump QGIS to 3.32.2 --- docker-qgis/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-qgis/Dockerfile b/docker-qgis/Dockerfile index 627f1b8e4..5e18678ac 100644 --- a/docker-qgis/Dockerfile +++ b/docker-qgis/Dockerfile @@ -1,4 +1,4 @@ -FROM qgis/qgis:final-3_30_3 +FROM qgis/qgis:final-3_32_2 RUN apt-get update \ && apt-get upgrade -y \ From 1ddce5c194abfb39f7dcc697099fa7baddb67bb4 Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Thu, 14 Sep 2023 22:07:08 +0300 Subject: [PATCH 17/42] Removed redis as a dependency In the end of the day it was never actually used and only brings noise when QFC is deployed. --- .env.example | 3 - .github/workflows/build_and_push.yml | 19 - .pre-commit-config.yaml | 4 +- README.md | 1 - .../core/management/commands/status.py | 5 - docker-app/qfieldcloud/core/tests/test_api.py | 1 - docker-app/qfieldcloud/core/utils.py | 13 - .../qfieldcloud/core/views/status_views.py | 5 - docker-app/requirements.txt | 1 - docker-app/wait_for_services.py | 24 - docker-compose.override.local.yml | 5 - docker-compose.yml | 14 - docker-redis/Dockerfile | 8 - docker-redis/redis.conf | 1862 ----------------- 14 files changed, 2 insertions(+), 1963 deletions(-) delete mode 100644 docker-redis/Dockerfile delete mode 100644 docker-redis/redis.conf diff --git a/.env.example b/.env.example index faf98c157..65af46907 100644 --- a/.env.example +++ b/.env.example @@ -71,9 +71,6 @@ SENTRY_SAMPLE_RATE=1 # DEFAULT: dev SENTRY_RELEASE=dev -REDIS_PASSWORD=change_me_with_a_very_loooooooooooong_password -REDIS_PORT=6379 - # Memcached port. Exposed only in docker-compose.local.yml # DEFAULT: 11211 MEMCACHED_PORT=11211 diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 7f1aa0088..1798b50f2 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -74,25 +74,6 @@ jobs: push: ${{ github.event_name != 'pull_request' }} tags: opengisch/qfieldcloud-worker-wrapper:${{ steps.prepare.outputs.tag }} - # Redis - - name: Docker Test Redis - id: docker_test_redis - uses: docker/build-push-action@v2 - with: - builder: ${{ steps.buildx.outputs.name }} - context: ./docker-redis - file: ./docker-redis/Dockerfile - - - name: Docker Build and Push Redis - id: docker_build_and_push_redis - uses: docker/build-push-action@v2 - with: - builder: ${{ steps.buildx.outputs.name }} - context: ./docker-redis - file: ./docker-redis/Dockerfile - push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-redis:${{ steps.prepare.outputs.tag }} - # QGIS - name: Docker Test QGIS id: docker_test_qgis diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index caa351c88..9900a7176 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,6 +51,6 @@ repos: rev: 'v1.5.1' hooks: - id: mypy - additional_dependencies: [types-pytz, types-Deprecated, types-PyYAML, types-requests, types-redis, types-tabulate, types-jsonschema, django-stubs] + additional_dependencies: [types-pytz, types-Deprecated, types-PyYAML, types-requests, types-tabulate, types-jsonschema, django-stubs] pass_filenames: false - entry: bash -c 'mypy -p docker-qgis -p docker-app -p docker-redis "$@"' -- + entry: bash -c 'mypy -p docker-qgis -p docker-app "$@"' -- diff --git a/README.md b/README.md index 523c6d22e..d0ed8b297 100644 --- a/README.md +++ b/README.md @@ -245,7 +245,6 @@ Based on this example | nginx https | 443 | WEB_HTTPS_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | | django http | 8011 | DJANGO_DEV_PORT | :white_check_mark: | :x: | :x: | | postgres | 5433 | HOST_POSTGRES_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| redis | 6379 | REDIS_PORT | :white_check_mark: | :white_check_mark: | :white_check_mark: | | memcached | 11211 | MEMCACHED_PORT | :white_check_mark: | :x: | :x: | | geodb | 5432 | HOST_POSTGRES_PORT | :white_check_mark: | :white_check_mark: | :x: | | minio API | 8009 | MINIO_API_PORT | :white_check_mark: | :x: | :x: | diff --git a/docker-app/qfieldcloud/core/management/commands/status.py b/docker-app/qfieldcloud/core/management/commands/status.py index 8d70f7cee..a2979ebcb 100644 --- a/docker-app/qfieldcloud/core/management/commands/status.py +++ b/docker-app/qfieldcloud/core/management/commands/status.py @@ -9,11 +9,6 @@ class Command(BaseCommand): def handle(self, *args, **options): results = {} - results["redis"] = "ok" - # Check if redis is visible - if not utils.redis_is_running(): - results["redis"] = "error" - results["geodb"] = "ok" # Check geodb if not geodb_utils.geodb_is_running(): diff --git a/docker-app/qfieldcloud/core/tests/test_api.py b/docker-app/qfieldcloud/core/tests/test_api.py index df5500a6a..ba5239c82 100644 --- a/docker-app/qfieldcloud/core/tests/test_api.py +++ b/docker-app/qfieldcloud/core/tests/test_api.py @@ -36,7 +36,6 @@ def setUp(self): def test_api_status(self): response = self.client.get("/api/v1/status/") self.assertTrue(status.is_success(response.status_code)) - self.assertEqual(response.json()["redis"], "ok") self.assertEqual(response.json()["storage"], "ok") self.assertEqual(response.json()["geodb"], "ok") diff --git a/docker-app/qfieldcloud/core/utils.py b/docker-app/qfieldcloud/core/utils.py index fc765c1bf..26e2aca93 100644 --- a/docker-app/qfieldcloud/core/utils.py +++ b/docker-app/qfieldcloud/core/utils.py @@ -14,7 +14,6 @@ from botocore.errorfactory import ClientError from django.conf import settings from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile -from redis import Redis, exceptions logger = logging.getLogger(__name__) @@ -85,18 +84,6 @@ def total_size(self) -> int: return sum(v.size for v in self.versions if v.size is not None) -def redis_is_running() -> bool: - try: - connection = Redis( - "redis", password=os.environ.get("REDIS_PASSWORD"), port=6379 - ) - connection.set("foo", "bar") - except exceptions.ConnectionError: - return False - - return True - - def get_s3_session() -> boto3.Session: """Get a new S3 Session instance using Django settings""" diff --git a/docker-app/qfieldcloud/core/views/status_views.py b/docker-app/qfieldcloud/core/views/status_views.py index 9293f5443..6febe4a1f 100644 --- a/docker-app/qfieldcloud/core/views/status_views.py +++ b/docker-app/qfieldcloud/core/views/status_views.py @@ -17,11 +17,6 @@ def get(self, request): # Try to get the status from the cache results = cache.get("status_results", {}) if not results: - results["redis"] = "ok" - # Check if redis is visible - if not utils.redis_is_running(): - results["redis"] = "error" - results["geodb"] = "ok" # Check geodb if not geodb_utils.geodb_is_running(): diff --git a/docker-app/requirements.txt b/docker-app/requirements.txt index cfae27a0a..eafb52820 100644 --- a/docker-app/requirements.txt +++ b/docker-app/requirements.txt @@ -67,7 +67,6 @@ pyrsistent==0.19.3 python-dateutil==2.8.2 python3-openid==3.2.0 pytz==2023.3 -redis==3.5.3 requests==2.31.0 requests-oauthlib==1.3.1 ruamel.yaml==0.17.26 diff --git a/docker-app/wait_for_services.py b/docker-app/wait_for_services.py index 6324f70e4..5f5e7da72 100644 --- a/docker-app/wait_for_services.py +++ b/docker-app/wait_for_services.py @@ -3,7 +3,6 @@ from time import sleep, time import psycopg2 -import redis logger = logging.getLogger() logger.setLevel(logging.INFO) @@ -41,26 +40,3 @@ def wait_for_postgres(): wait_for_postgres() - - -def wait_for_redis(): - logger.info("Waiting for redis...") - start_time = time() - while time() - start_time < TIMEOUT: - logger.info("Waiting for redis...") - try: - r = redis.Redis( - host="redis", password=os.environ.get("REDIS_PASSWORD"), db=0 - ) - if not r.ping(): - raise Exception - logger.info("Redis is ready! ✨ 💅") - return True - except Exception as e: - logger.info("Redis isn't ready.\n%s" % e) - sleep(INTERVAL) - - return False - - -wait_for_redis() diff --git a/docker-compose.override.local.yml b/docker-compose.override.local.yml index 5140114d1..08590a282 100644 --- a/docker-compose.override.local.yml +++ b/docker-compose.override.local.yml @@ -18,7 +18,6 @@ services: command: python3 -m debugpy --listen 0.0.0.0:5678 manage.py runserver 0.0.0.0:8000 depends_on: - db - - redis worker_wrapper: scale: ${QFIELDCLOUD_WORKER_REPLICAS} @@ -62,10 +61,6 @@ services: - ${HOST_POSTGRES_PORT}:5432 command: ["postgres", "-c", "log_statement=all", "-c", "log_destination=stderr"] - redis: - ports: - - "${REDIS_PORT}:6379" - memcached: ports: - "${MEMCACHED_PORT}:11211" diff --git a/docker-compose.yml b/docker-compose.yml index 3bee88a0b..daa8f1daa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -49,7 +49,6 @@ services: STORAGE_REGION_NAME: ${STORAGE_REGION_NAME} STORAGE_ENDPOINT_URL: ${STORAGE_ENDPOINT_URL} QFIELDCLOUD_DEFAULT_NETWORK: ${QFIELDCLOUD_DEFAULT_NETWORK} - REDIS_PASSWORD: ${REDIS_PASSWORD} GEODB_HOST: ${GEODB_HOST} GEODB_PORT: ${GEODB_PORT} GEODB_USER: ${GEODB_USER} @@ -87,8 +86,6 @@ services: ofelia.job-exec.runcrons.no-overlap: "true" ofelia.job-exec.runcrons.schedule: "@every 1m" ofelia.job-exec.runcrons.command: python manage.py runcrons - depends_on: - - redis nginx: image: nginx:stable @@ -141,17 +138,6 @@ services: command: bash -c "echo QGIS built" logging: *default-logging - redis: - build: - context: ./docker-redis - network: host - args: - REDIS_PASSWORD: ${REDIS_PASSWORD} - restart: unless-stopped - expose: - - "6379:6379" - logging: *default-logging - worker_wrapper: <<: *default-django build: diff --git a/docker-redis/Dockerfile b/docker-redis/Dockerfile deleted file mode 100644 index c4a6dfd2c..000000000 --- a/docker-redis/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM redis:6.0.8 - -ADD ./redis.conf /redis.conf - -# Add the user to redis conf -ARG REDIS_PASSWORD -RUN echo "\n\nuser default +@all ~* on >$REDIS_PASSWORD" >> /redis.conf -CMD [ "redis-server", "/redis.conf" ] diff --git a/docker-redis/redis.conf b/docker-redis/redis.conf deleted file mode 100644 index 6669be89e..000000000 --- a/docker-redis/redis.conf +++ /dev/null @@ -1,1862 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 loopback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# bind 127.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# TLS/SSL ##################################### - -# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration -# directive can be used to define TLS-listening ports. To enable TLS on the -# default port, use: -# -# port 0 -# tls-port 6379 - -# Configure a X.509 certificate and private key to use for authenticating the -# server to connected clients, masters or cluster peers. These files should be -# PEM formatted. -# -# tls-cert-file redis.crt -# tls-key-file redis.key - -# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: -# -# tls-dh-params-file redis.dh - -# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL -# clients and peers. Redis requires an explicit configuration of at least one -# of these, and will not implicitly use the system wide configuration. -# -# tls-ca-cert-file ca.crt -# tls-ca-cert-dir /etc/ssl/certs - -# By default, clients (including replica servers) on a TLS port are required -# to authenticate using valid client side certificates. -# -# If "no" is specified, client certificates are not required and not accepted. -# If "optional" is specified, client certificates are accepted and must be -# valid if provided, but are not required. -# -# tls-auth-clients no -# tls-auth-clients optional - -# By default, a Redis replica does not attempt to establish a TLS connection -# with its master. -# -# Use the following directive to enable TLS on replication links. -# -# tls-replication yes - -# By default, the Redis Cluster bus uses a plain TCP connection. To enable -# TLS for the bus protocol, use the following directive: -# -# tls-cluster yes - -# Explicitly specify TLS versions to support. Allowed values are case insensitive -# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or -# any combination. To enable only TLSv1.2 and TLSv1.3, use: -# -# tls-protocols "TLSv1.2 TLSv1.3" - -# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information -# about the syntax of this string. -# -# Note: this configuration applies only to <= TLSv1.2. -# -# tls-ciphers DEFAULT:!MEDIUM - -# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more -# information about the syntax of this string, and specifically for TLSv1.3 -# ciphersuites. -# -# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 - -# When choosing a cipher, use the server's preference instead of the client -# preference. By default, the server follows the client's preference. -# -# tls-prefer-server-ciphers yes - -# By default, TLS session caching is enabled to allow faster and less expensive -# reconnections by clients that support it. Use the following directive to disable -# caching. -# -# tls-session-caching no - -# Change the default number of TLS sessions cached. A zero value sets the cache -# to unlimited size. The default size is 20480. -# -# tls-session-cache-size 5000 - -# Change the default timeout of cached TLS sessions. The default timeout is 300 -# seconds. -# -# tls-session-cache-timeout 60 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 -save "" - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error no - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# Remove RDB files used by replication in instances without persistence -# enabled. By default this option is disabled, however there are environments -# where for regulations or other security concerns, RDB files persisted on -# disk by masters in order to feed replicas, or stored on disk by replicas -# in order to load them for the initial synchronization, should be deleted -# ASAP. Note that this option ONLY WORKS in instances that have both AOF -# and RDB persistence disabled, otherwise is completely ignored. -# -# An alternative (and sometimes better) way to obtain the same effect is -# to use diskless replication on both master and replicas instances. However -# in the case of replicas, diskless is not always an option. -rdb-del-sync-files no - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Replica replication. Use replicaof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# +------------------+ +---------------+ -# | Master | ---> | Replica | -# | (receive writes) | | (exact copy) | -# +------------------+ +---------------+ -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of replicas. -# 2) Redis replicas are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to masters -# and resynchronize with them. -# -# replicaof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the replica request. -# -# masterauth -# -# However this is not enough if you are using Redis ACLs (for Redis version -# 6 or greater), and the default user is not capable of running the PSYNC -# command and/or other commands needed for replication. In this case it's -# better to configure a special user to use with replication, and specify the -# masteruser configuration as such: -# -# masteruser -# -# When masteruser is specified, the replica will authenticate against its -# master using the new AUTH form: AUTH . - -# When a replica loses its connection with the master, or when the replication -# is still in progress, the replica can act in two different ways: -# -# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if replica-serve-stale-data is set to 'no' the replica will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, -# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, -# COMMAND, POST, HOST: and LATENCY. -# -replica-serve-stale-data yes - -# You can configure a replica instance to accept writes or not. Writing against -# a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default replicas are read-only. -# -# Note: read only replicas are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only replica exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only replicas using 'rename-command' to shadow all the -# administrative / dangerous commands. -replica-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# New replicas and reconnecting replicas that are not able to continue the -# replication process just receiving differences, need to do what is called a -# "full synchronization". An RDB file is transmitted from the master to the -# replicas. -# -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the replicas incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to replica sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more replicas -# can be queued and served with the RDB file as soon as the current child -# producing the RDB file finishes its work. With diskless replication instead -# once the transfer starts, new replicas arriving will be queued and a new -# transfer will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple -# replicas will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the replicas. -# -# This is important since once the transfer starts, it is not possible to serve -# new replicas arriving, that will be queued for the next RDB transfer, so the -# server waits a delay in order to let more replicas arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# ----------------------------------------------------------------------------- -# WARNING: RDB diskless load is experimental. Since in this setup the replica -# does not immediately store an RDB on disk, it may cause data loss during -# failovers. RDB diskless load + Redis modules not handling I/O reads may also -# cause Redis to abort in case of I/O errors during the initial synchronization -# stage with the master. Use only if your do what you are doing. -# ----------------------------------------------------------------------------- -# -# Replica can load the RDB it reads from the replication link directly from the -# socket, or store the RDB to a file and read that file after it was completely -# recived from the master. -# -# In many cases the disk is slower than the network, and storing and loading -# the RDB file may increase replication time (and even increase the master's -# Copy on Write memory and salve buffers). -# However, parsing the RDB file directly from the socket may mean that we have -# to flush the contents of the current database before the full rdb was -# received. For this reason we have the following options: -# -# "disabled" - Don't use diskless load (store the rdb file to the disk first) -# "on-empty-db" - Use diskless load only when it is completely safe. -# "swapdb" - Keep a copy of the current db contents in RAM while parsing -# the data directly from the socket. note that this requires -# sufficient memory, if you don't have it, you risk an OOM kill. -repl-diskless-load disabled - -# Replicas send PINGs to server in a predefined interval. It's possible to -# change this interval with the repl_ping_replica_period option. The default -# value is 10 seconds. -# -# repl-ping-replica-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of replica. -# 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the replica socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to replicas. But this can add a delay for -# the data to appear on the replica side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the replica side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and replicas are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# replica data when replicas are disconnected for some time, so that when a -# replica wants to reconnect again, often a full resync is not needed, but a -# partial resync is enough, just passing the portion of data the replica -# missed while disconnected. -# -# The bigger the replication backlog, the longer the time the replica can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a replica connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected replicas for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last replica disconnected, for -# the backlog buffer to be freed. -# -# Note that replicas never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the replicas: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The replica priority is an integer number published by Redis in the INFO -# output. It is used by Redis Sentinel in order to select a replica to promote -# into a master if the master is no longer working correctly. -# -# A replica with a low priority number is considered better for promotion, so -# for instance if there are three replicas with priority 10, 100, 25 Sentinel -# will pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the replica as not able to perform the -# role of master, so a replica with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -replica-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N replicas connected, having a lag less or equal than M seconds. -# -# The N replicas need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the replica, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough replicas -# are available, to the specified number of seconds. -# -# For example to require at least 3 replicas with a lag <= 10 seconds use: -# -# min-replicas-to-write 3 -# min-replicas-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-replicas-to-write is set to 0 (feature disabled) and -# min-replicas-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# replicas in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover replica instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a replica is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. -# -# Port: The port is communicated by the replica during the replication -# handshake, and is normally the port that the replica is using to -# listen for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the replica may be actually reachable via different IP and port -# pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# replica-announce-ip 5.5.5.5 -# replica-announce-port 1234 - -############################### KEYS TRACKING ################################# - -# Redis implements server assisted support for client side caching of values. -# This is implemented using an invalidation table that remembers, using -# 16 millions of slots, what clients may have certain subsets of keys. In turn -# this is used in order to send invalidation messages to clients. Please -# to understand more about the feature check this page: -# -# https://redis.io/topics/client-side-caching -# -# When tracking is enabled for a client, all the read only queries are assumed -# to be cached: this will force Redis to store information in the invalidation -# table. When keys are modified, such information is flushed away, and -# invalidation messages are sent to the clients. However if the workload is -# heavily dominated by reads, Redis could use more and more memory in order -# to track the keys fetched by many clients. -# -# For this reason it is possible to configure a maximum fill value for the -# invalidation table. By default it is set to 1M of keys, and once this limit -# is reached, Redis will start to evict keys in the invalidation table -# even if they were not modified, just to reclaim memory: this will in turn -# force the clients to invalidate the cached values. Basically the table -# maximum size is a trade off between the memory you want to spend server -# side to track information about who cached what, and the ability of clients -# to retain cached objects in memory. -# -# If you set the value to 0, it means there are no limits, and Redis will -# retain as many keys as needed in the invalidation table. -# In the "stats" INFO section, you can find information about the number of -# keys in the invalidation table at every given moment. -# -# Note: when key tracking is used in broadcasting mode, no memory is used -# in the server side so this setting is useless. -# -# tracking-table-max-keys 1000000 - -################################## SECURITY ################################### - -# Warning: since Redis is pretty fast an outside user can try up to -# 1 million passwords per second against a modern box. This means that you -# should use very strong passwords, otherwise they will be very easy to break. -# Note that because the password is really a shared secret between the client -# and the server, and should not be memorized by any human, the password -# can be easily a long string from /dev/urandom or whatever, so by using a -# long and unguessable password no brute force attack will be possible. - -# Redis ACL users are defined in the following format: -# -# user ... acl rules ... -# -# For example: -# -# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 -# -# The special username "default" is used for new connections. If this user -# has the "nopass" rule, then new connections will be immediately authenticated -# as the "default" user without the need of any password provided via the -# AUTH command. Otherwise if the "default" user is not flagged with "nopass" -# the connections will start in not authenticated state, and will require -# AUTH (or the HELLO command AUTH option) in order to be authenticated and -# start to work. -# -# The ACL rules that describe what an user can do are the following: -# -# on Enable the user: it is possible to authenticate as this user. -# off Disable the user: it's no longer possible to authenticate -# with this user, however the already authenticated connections -# will still work. -# + Allow the execution of that command -# - Disallow the execution of that command -# +@ Allow the execution of all the commands in such category -# with valid categories are like @admin, @set, @sortedset, ... -# and so forth, see the full list in the server.c file where -# the Redis command table is described and defined. -# The special category @all means all the commands, but currently -# present in the server, and that will be loaded in the future -# via modules. -# +|subcommand Allow a specific subcommand of an otherwise -# disabled command. Note that this form is not -# allowed as negative like -DEBUG|SEGFAULT, but -# only additive starting with "+". -# allcommands Alias for +@all. Note that it implies the ability to execute -# all the future commands loaded via the modules system. -# nocommands Alias for -@all. -# ~ Add a pattern of keys that can be mentioned as part of -# commands. For instance ~* allows all the keys. The pattern -# is a glob-style pattern like the one of KEYS. -# It is possible to specify multiple patterns. -# allkeys Alias for ~* -# resetkeys Flush the list of allowed keys patterns. -# > Add this passowrd to the list of valid password for the user. -# For example >mypass will add "mypass" to the list. -# This directive clears the "nopass" flag (see later). -# < Remove this password from the list of valid passwords. -# nopass All the set passwords of the user are removed, and the user -# is flagged as requiring no password: it means that every -# password will work against this user. If this directive is -# used for the default user, every new connection will be -# immediately authenticated with the default user without -# any explicit AUTH command required. Note that the "resetpass" -# directive will clear this condition. -# resetpass Flush the list of allowed passwords. Moreover removes the -# "nopass" status. After "resetpass" the user has no associated -# passwords and there is no way to authenticate without adding -# some password (or setting it as "nopass" later). -# reset Performs the following actions: resetpass, resetkeys, off, -# -@all. The user returns to the same state it has immediately -# after its creation. -# -# ACL rules can be specified in any order: for instance you can start with -# passwords, then flags, or key patterns. However note that the additive -# and subtractive rules will CHANGE MEANING depending on the ordering. -# For instance see the following example: -# -# user alice on +@all -DEBUG ~* >somepassword -# -# This will allow "alice" to use all the commands with the exception of the -# DEBUG command, since +@all added all the commands to the set of the commands -# alice can use, and later DEBUG was removed. However if we invert the order -# of two ACL rules the result will be different: -# -# user alice on -DEBUG +@all ~* >somepassword -# -# Now DEBUG was removed when alice had yet no commands in the set of allowed -# commands, later all the commands are added, so the user will be able to -# execute everything. -# -# Basically ACL rules are processed left-to-right. -# -# For more information about ACL configuration please refer to -# the Redis web site at https://redis.io/topics/acl - -# ACL LOG -# -# The ACL Log tracks failed commands and authentication events associated -# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked -# by ACLs. The ACL Log is stored in memory. You can reclaim memory with -# ACL LOG RESET. Define the maximum entry length of the ACL Log below. -acllog-max-len 128 - -# Using an external ACL file -# -# Instead of configuring users here in this file, it is possible to use -# a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the exteranl -# ACL file, the server will refuse to start. -# -# The format of the external ACL user file is exactly the same as the -# format that is used inside redis.conf to describe users. -# -# aclfile /etc/redis/users.acl - -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity -# layer on top of the new ACL system. The option effect will be just setting -# the password for the default user. Clients will still authenticate using -# AUTH as usually, or more explicitly with AUTH default -# if they follow the new protocol: both will work. -# -# requirepass foobared - -# Command renaming (DEPRECATED). -# -# ------------------------------------------------------------------------ -# WARNING: avoid using this option if possible. Instead use ACLs to remove -# commands from the default user, and put them only in some admin user you -# create for administrative purposes. -# ------------------------------------------------------------------------ -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to replicas may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# IMPORTANT: When Redis Cluster is used, the max number of connections is also -# shared with the cluster bus: every node in the cluster will use two -# connections, one incoming and another outgoing. It is important to size the -# limit accordingly in case of very large clusters. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have replicas attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the replicas are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of replicas is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have replicas attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for replica -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select one from the following behaviors: -# -# volatile-lru -> Evict using approximated LRU, only keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key having an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -# Starting from Redis 5, by default a replica will ignore its maxmemory setting -# (unless it is promoted to master after a failover or manually). It means -# that the eviction of keys will be just handled by the master, sending the -# DEL commands to the replica as keys evict in the master side. -# -# This behavior ensures that masters and replicas stay consistent, and is usually -# what you want, however if your replica is writable, or you want the replica -# to have a different memory setting, and you are sure all the writes performed -# to the replica are idempotent, then you may change this default (but be sure -# to understand what you are doing). -# -# Note that since the replica by default does not evict, it may end using more -# memory than the one set via maxmemory (there are certain buffers that may -# be larger on the replica, or data structures may sometimes take more memory -# and so forth). So make sure you monitor your replicas and make sure they -# have enough memory to never hit a real out-of-memory condition before the -# master hits the configured maxmemory setting. -# -# replica-ignore-maxmemory yes - -# Redis reclaims expired keys in two ways: upon access when those keys are -# found to be expired, and also in background, in what is called the -# "active expire key". The key space is slowly and interactively scanned -# looking for expired keys to reclaim, so that it is possible to free memory -# of keys that are expired and will never be accessed again in a short time. -# -# The default effort of the expire cycle will try to avoid having more than -# ten percent of expired keys still in memory, and will try to avoid consuming -# more than 25% of total memory and to add latency to the system. However -# it is possible to increase the expire "effort" that is normally set to -# "1", to a greater value, up to the value "10". At its maximum value the -# system will use more CPU, longer cycles (and technically may introduce -# more latency), and will tollerate less already expired keys still present -# in the system. It's a tradeoff betweeen memory, CPU and latecy. -# -# active-expire-effort 1 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a replica performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transferred. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives. - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -replica-lazy-flush no - -# It is also possible, for the case when to replace the user code DEL calls -# with UNLINK calls is not easy, to modify the default behavior of the DEL -# command to act exactly like UNLINK, using the following configuration -# directive: - -lazyfree-lazy-user-del no - -################################ THREADED I/O ################################# - -# Redis is mostly single threaded, however there are certain threaded -# operations such as UNLINK, slow I/O accesses and other things that are -# performed on side threads. -# -# Now it is also possible to handle Redis clients socket reads and writes -# in different I/O threads. Since especially writing is so slow, normally -# Redis users use pipelining in order to speedup the Redis performances per -# core, and spawn multiple instances in order to scale more. Using I/O -# threads it is possible to easily speedup two times Redis without resorting -# to pipelining nor sharding of the instance. -# -# By default threading is disabled, we suggest enabling it only in machines -# that have at least 4 or more cores, leaving at least one spare core. -# Using more than 8 threads is unlikely to help much. We also recommend using -# threaded I/O only if you actually have performance problems, with Redis -# instances being able to use a quite big percentage of CPU time, otherwise -# there is no point in using this feature. -# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# io-threads 4 -# -# Setting io-threads to 1 will just use the main thread as usually. -# When I/O threads are enabled, we only use threads for writes, that is -# to thread the write(2) syscall and transfer the client buffers to the -# socket. However it is also possible to enable threading of reads and -# protocol parsing using the following configuration directive, by setting -# it to yes: -# -# io-threads-do-reads no -# -# Usually threading reads doesn't help much. -# -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. Aso this feature currently does not work when SSL is -# enabled. -# -# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make -# sure you also run the benchmark itself in threaded mode, using the -# --threads option to match the number of Redis theads, otherwise you'll not -# be able to notice the improvements. - -############################ KERNEL OOM CONTROL ############################## - -# On Linux, it is possible to hint the kernel OOM killer on what processes -# should be killed first when out of memory. -# -# Enabling this feature makes Redis actively control the oom_score_adj value -# for all its processes, depending on their role. The default scores will -# attempt to have background child processes killed before all others, and -# replicas killed before masters. - -oom-score-adj no - -# When oom-score-adj is used, this directive controls the specific values used -# for master, replica and background child processes. Values range -1000 to -# 1000 (higher means more likely to be killed). -# -# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) -# can freely increase their value, but not decrease it below its initial -# settings. -# -# Values are used relative to the initial value of oom_score_adj when the server -# starts. Because typically the initial value is 0, they will often match the -# absolute values. - -oom-score-adj-values 0 200 800 - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -aof-use-rdb-preamble yes - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### - -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A replica of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a replica to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple replicas able to failover, they exchange messages -# in order to try to give an advantage to the replica with the best -# replication offset (more data from the master processed). -# Replicas will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single replica computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the replica will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * replica-validity-factor) + repl-ping-replica-period -# -# So for example if node-timeout is 30 seconds, and the replica-validity-factor -# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large replica-validity-factor may allow replicas with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a replica at all. -# -# For maximum availability, it is possible to set the replica-validity-factor -# to a value of 0, which means, that replicas will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-replica-validity-factor 10 - -# Cluster replicas are able to migrate to orphaned masters, that are masters -# that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working replicas. -# -# Replicas migrate to orphaned masters only if there are still at least a -# given number of other working replicas for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its master -# and so forth. It usually reflects the number of replicas you want for every -# master in your cluster. -# -# Default is 1 (replicas migrate only if their masters remain with at least -# one replica). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents replicas from trying to failover its -# master during master failures. However the master can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-replica-no-failover no - -# This option, when set to yes, allows nodes to serve read traffic while the -# the cluster is in a down state, as long as it believes it owns the slots. -# -# This is useful for two cases. The first case is for when an application -# doesn't require consistency of data during node failures or network partitions. -# One example of this is a cache, where as long as the node has the data it -# should be able to serve it. -# -# The second use case is for configurations that don't meet the recommended -# three shards but want to enable cluster mode and scale later. A -# master outage in a 1 or 2 shard configuration causes a read/write outage to the -# entire cluster without this option set, with it set there is only a write outage. -# Without a quorum of masters, slot ownership will not change automatically. -# -# cluster-allow-reads-when-down no - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# t Stream commands -# m Key-miss events (Note: It is not included in the 'A' class) -# A Alias for g$lshzxet, so that the "AKE" string means all the events -# (Except key-miss events which are excluded from 'A' due to their -# unique nature). -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### GOPHER SERVER ################################# - -# Redis contains an implementation of the Gopher protocol, as specified in -# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). -# -# The Gopher protocol was very popular in the late '90s. It is an alternative -# to the web, and the implementation both server and client side is so simple -# that the Redis server has just 100 lines of code in order to implement this -# support. -# -# What do you do with Gopher nowadays? Well Gopher never *really* died, and -# lately there is a movement in order for the Gopher more hierarchical content -# composed of just plain text documents to be resurrected. Some want a simpler -# internet, others believe that the mainstream internet became too much -# controlled, and it's cool to create an alternative space for people that -# want a bit of fresh air. -# -# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol -# as a gift. -# -# --- HOW IT WORKS? --- -# -# The Redis Gopher support uses the inline protocol of Redis, and specifically -# two kind of inline requests that were anyway illegal: an empty request -# or any request that starts with "/" (there are no Redis commands starting -# with such a slash). Normal RESP2/RESP3 requests are completely out of the -# path of the Gopher protocol implementation and are served as usually as well. -# -# If you open a connection to Redis when Gopher is enabled and send it -# a string like "/foo", if there is a key named "/foo" it is served via the -# Gopher protocol. -# -# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher -# talking), you likely need a script like the following: -# -# https://github.com/antirez/gopher2redis -# -# --- SECURITY WARNING --- -# -# If you plan to put Redis on the internet in a publicly accessible address -# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. -# Once a password is set: -# -# 1. The Gopher server (when enabled, not by default) will still serve -# content via Gopher. -# 2. However other commands cannot be called before the client will -# authenticate. -# -# So use the 'requirepass' option to protect your instance. -# -# To enable Gopher support uncomment the following line and set -# the option from no (the default) to yes. -# -# gopher-enabled no - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Streams macro node max size / items. The stream data structure is a radix -# tree of big nodes that encode multiple items inside. Using this configuration -# it is possible to configure how big a single node can be in bytes, and the -# maximum number of items it may contain before switching to a new node when -# appending new stream entries. If any of the following settings are set to -# zero, the limit is ignored, so for instance it is possible to set just a -# max entires limit by setting max-bytes to 0 and max-entries to the desired -# value. -stream-node-max-bytes 4096 -stream-node-max-entries 100 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# replica -> replica clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and replica clients, since -# subscribers and replicas receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit replica 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit -# here, but must be 1mb or greater -# -# proto-max-bulk-len 512mb - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# Normally it is useful to have an HZ value which is proportional to the -# number of clients connected. This is useful in order, for instance, to -# avoid too many clients are processed for each background task invocation -# in order to avoid latency spikes. -# -# Since the default HZ value by default is conservatively set to 10, Redis -# offers, and enables by default, the ability to use an adaptive HZ value -# which will temporary raise when there are many connected clients. -# -# When dynamic HZ is enabled, the actual configured HZ will be used -# as a baseline, but multiples of the configured HZ value will be actually -# used as needed once more clients are connected. In this way an idle -# instance will use very little CPU time while a busy instance will be -# more responsive. -dynamic-hz yes - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# When redis saves RDB file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -rdb-save-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag no - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage, to be used when the lower -# threshold is reached -# active-defrag-cycle-min 1 - -# Maximal effort for defrag in CPU percentage, to be used when the upper -# threshold is reached -# active-defrag-cycle-max 25 - -# Maximum number of set/hash/zset/list fields that will be processed from -# the main dictionary scan -# active-defrag-max-scan-fields 1000 - -# Jemalloc background thread for purging will be enabled by default -jemalloc-bg-thread yes - -# It is possible to pin different threads and processes of Redis to specific -# CPUs in your system, in order to maximize the performances of the server. -# This is useful both in order to pin different Redis threads in different -# CPUs, but also in order to make sure that multiple Redis instances running -# in the same host will be pinned to different CPUs. -# -# Normally you can do this using the "taskset" command, however it is also -# possible to this via Redis configuration directly, both in Linux and FreeBSD. -# -# You can pin the server/IO threads, bio threads, aof rewrite child process, and -# the bgsave child process. The syntax to specify the cpu list is the same as -# the taskset command: -# -# Set redis server/io threads to cpu affinity 0,2,4,6: -# server_cpulist 0-7:2 -# -# Set bio threads to cpu affinity 1,3: -# bio_cpulist 1,3 -# -# Set aof rewrite child process to cpu affinity 8,9,10,11: -# aof_rewrite_cpulist 8-11 -# -# Set bgsave child process to cpu affinity 1,10,11 -# bgsave_cpulist 1,10-11 From ee9b9d32c562e22417dc87a6698bac4aaf49e6d6 Mon Sep 17 00:00:00 2001 From: faebebin Date: Thu, 28 Sep 2023 00:16:08 +0200 Subject: [PATCH 18/42] Ensure create_subscription created_by Person and overwrite createsuperuser --- .../management/commands/createsuperuser.py | 13 ++++++ .../core/management/commands/createuser.py | 42 ------------------- docker-app/qfieldcloud/settings.py | 3 +- docker-app/qfieldcloud/subscription/models.py | 5 +++ 4 files changed, 20 insertions(+), 43 deletions(-) create mode 100644 docker-app/qfieldcloud/core/management/commands/createsuperuser.py delete mode 100644 docker-app/qfieldcloud/core/management/commands/createuser.py diff --git a/docker-app/qfieldcloud/core/management/commands/createsuperuser.py b/docker-app/qfieldcloud/core/management/commands/createsuperuser.py new file mode 100644 index 000000000..439104d11 --- /dev/null +++ b/docker-app/qfieldcloud/core/management/commands/createsuperuser.py @@ -0,0 +1,13 @@ +from django.contrib.auth.management.commands.createsuperuser import Command as SuperUserCommand + +from qfieldcloud.core.models import Person + + +class Command(SuperUserCommand): + """ + We overwrite the django createsuperuser command because it uses the wrong User model + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.UserModel = Person diff --git a/docker-app/qfieldcloud/core/management/commands/createuser.py b/docker-app/qfieldcloud/core/management/commands/createuser.py deleted file mode 100644 index 4e0b71bcb..000000000 --- a/docker-app/qfieldcloud/core/management/commands/createuser.py +++ /dev/null @@ -1,42 +0,0 @@ -from django.contrib.auth import get_user_model -from django.core.management.base import BaseCommand - - -class Command(BaseCommand): - """ - Creates a normal or super user using the CLI. - Unlike the Django's createsuperuser command, here we can pass the password as an argument. - This is a utility function that is expected to be used only for testing purposes. - """ - - help = """ - Create a user with given username, email and password - Usage: python manage.py createuser --username=test --email=test@test.com --password=test --superuser - """ - - def add_arguments(self, parser): - parser.add_argument("--username", type=str, required=True) - parser.add_argument("--password", type=str, required=True) - parser.add_argument("--email", type=str, required=True) - parser.add_argument("--superuser", action="store_true") - - def handle(self, *args, **options): - username = options.get("username") - password = options.get("password") - email = options.get("email") - is_superuser = options.get("superuser") - try: - User = get_user_model() - if not User.objects.filter(username=username).exists(): - User.objects.create_user( - username=username, - email=email, - password=password, - is_superuser=is_superuser, - ) - print(f"User {username} has been successfully created\n") - else: - print(f"User {username} already exists\n") - except Exception as e: - print("ERROR: Unable to create user\n%s\n" % e) - exit(1) diff --git a/docker-app/qfieldcloud/settings.py b/docker-app/qfieldcloud/settings.py index 1107e9eb0..07d6ba4d9 100644 --- a/docker-app/qfieldcloud/settings.py +++ b/docker-app/qfieldcloud/settings.py @@ -68,7 +68,6 @@ INSTALLED_APPS = [ # django contrib "django.contrib.admin", - "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.gis", "django.contrib.sessions", @@ -96,6 +95,8 @@ "auditlog", # Local "qfieldcloud.core", + # listed after core becuase we overwrite createsuperuser command + "django.contrib.auth", "qfieldcloud.subscription", "qfieldcloud.notifs", "qfieldcloud.authentication", diff --git a/docker-app/qfieldcloud/subscription/models.py b/docker-app/qfieldcloud/subscription/models.py index 0fa0a2bac..9fcaf8a03 100644 --- a/docker-app/qfieldcloud/subscription/models.py +++ b/docker-app/qfieldcloud/subscription/models.py @@ -776,6 +776,11 @@ def create_default_plan_subscription( else: created_by = account.user + if not isinstance(created_by, Person): + created_by = Person.objects.get(pk=created_by.pk) + + + if active_since is None: active_since = timezone.now() From 4e291289b92af51f9c06ea8a30cde19e376e3091 Mon Sep 17 00:00:00 2001 From: faebebin Date: Thu, 28 Sep 2023 00:25:07 +0200 Subject: [PATCH 19/42] Fix formatting --- .../qfieldcloud/core/management/commands/createsuperuser.py | 5 +++-- docker-app/qfieldcloud/subscription/models.py | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docker-app/qfieldcloud/core/management/commands/createsuperuser.py b/docker-app/qfieldcloud/core/management/commands/createsuperuser.py index 439104d11..18252dcd7 100644 --- a/docker-app/qfieldcloud/core/management/commands/createsuperuser.py +++ b/docker-app/qfieldcloud/core/management/commands/createsuperuser.py @@ -1,5 +1,6 @@ -from django.contrib.auth.management.commands.createsuperuser import Command as SuperUserCommand - +from django.contrib.auth.management.commands.createsuperuser import ( + Command as SuperUserCommand, +) from qfieldcloud.core.models import Person diff --git a/docker-app/qfieldcloud/subscription/models.py b/docker-app/qfieldcloud/subscription/models.py index 9fcaf8a03..6c937bd94 100644 --- a/docker-app/qfieldcloud/subscription/models.py +++ b/docker-app/qfieldcloud/subscription/models.py @@ -779,8 +779,6 @@ def create_default_plan_subscription( if not isinstance(created_by, Person): created_by = Person.objects.get(pk=created_by.pk) - - if active_since is None: active_since = timezone.now() From 713baa197016daa2fd48bc1718d22fcec006943d Mon Sep 17 00:00:00 2001 From: faebebin Date: Thu, 28 Sep 2023 09:33:55 +0200 Subject: [PATCH 20/42] Track remaining_trial_organizations on rog owner --- docker-app/qfieldcloud/subscription/models.py | 18 ++++++---- .../subscription/tests/test_subscription.py | 35 +++++++++++-------- 2 files changed, 31 insertions(+), 22 deletions(-) diff --git a/docker-app/qfieldcloud/subscription/models.py b/docker-app/qfieldcloud/subscription/models.py index 0fa0a2bac..1737cdfd8 100644 --- a/docker-app/qfieldcloud/subscription/models.py +++ b/docker-app/qfieldcloud/subscription/models.py @@ -834,9 +834,12 @@ def create_subscription( # NOTE to get annotations, mostly `is_active` trial_subscription_obj = cls.objects.get(pk=trial_subscription.pk) - if created_by.remaining_trial_organizations > 0: - created_by.remaining_trial_organizations -= 1 - created_by.save(update_fields=["remaining_trial_organizations"]) + if ( + account.user.is_organization + and account.user.owner.remaining_trial_organizations > 0 + ): + account.user.owner.remaining_trial_organizations -= 1 + account.user.owner.save(update_fields=["remaining_trial_organizations"]) # the trial plan should be the default plan regular_plan = Plan.objects.get( @@ -854,10 +857,11 @@ def create_subscription( # NOTE in case the user had a custom amount set (e.g manually set by support) this will # be overwritten by a subscription plan change. # But taking care of this would add quite some complexity. - created_by.remaining_trial_organizations = ( - regular_plan.max_trial_organizations - ) - created_by.save(update_fields=["remaining_trial_organizations"]) + if account.user.is_person: + account.user.remaining_trial_organizations = ( + regular_plan.max_trial_organizations + ) + account.user.save(update_fields=["remaining_trial_organizations"]) logger.info(f"Creating regular subscription from {regular_active_since}") regular_subscription = cls.objects.create( diff --git a/docker-app/qfieldcloud/subscription/tests/test_subscription.py b/docker-app/qfieldcloud/subscription/tests/test_subscription.py index 40fbf8311..239a802ac 100644 --- a/docker-app/qfieldcloud/subscription/tests/test_subscription.py +++ b/docker-app/qfieldcloud/subscription/tests/test_subscription.py @@ -731,33 +731,38 @@ def test_remaining_trial_organizations_is_set_and_decremented( user_plan.max_trial_organizations = 2 user_plan.save(update_fields=["max_trial_organizations"]) u1 = Person.objects.create(username="u1") + u2 = Person.objects.create(username="u2") # remaining_trial_organizations is set on create_subscription self.assertEqual(u1.remaining_trial_organizations, 2) + self.assertEqual(u2.remaining_trial_organizations, 2) trial_plan = Plan.objects.get(code="default_org") trial_plan.is_trial = True trial_plan.save(update_fields=["is_trial"]) - def assert_create_org_decrements_count( - org_name, user, remaining_trial_organizations - ): - Organization.objects.create( - username=org_name, organization_owner=user, created_by=user - ) - user.refresh_from_db() - self.assertEqual( - user.remaining_trial_organizations, remaining_trial_organizations - ) - - # remaining_trial_organizations is decremented when creating a trial organization - assert_create_org_decrements_count("org1", u1, 1) + # remaining_trial_organizations is decremented for owner when creating a trial organization + Organization.objects.create( + username="org2", organization_owner=u2, created_by=u1 + ) + u2.refresh_from_db() + self.assertEqual(u2.remaining_trial_organizations, 1) + u1.refresh_from_db() + self.assertEqual(u2.remaining_trial_organizations, 2) # remaining_trial_organizations is decremented down to 0 - assert_create_org_decrements_count("org2", u1, 0) + Organization.objects.create( + username="org3", organization_owner=u2, created_by=u1 + ) + u2.refresh_from_db() + self.assertEqual(u2.remaining_trial_organizations, 0) # It doesn't directly prevent creating more trials, is just a counter that stays at 0 - assert_create_org_decrements_count("org3", u1, 0) + Organization.objects.create( + username="org4", organization_owner=u2, created_by=u1 + ) + u2.refresh_from_db() + self.assertEqual(u2.remaining_trial_organizations, 0) def test_project_lists_duplicates_if_multiple_subscriptions(self): u1 = Person.objects.create(username="u1") From 9c9d067fe16db0f2ff3579e0dce4d7db78756336 Mon Sep 17 00:00:00 2001 From: faebebin Date: Thu, 28 Sep 2023 10:17:06 +0200 Subject: [PATCH 21/42] Fix owner field --- docker-app/qfieldcloud/subscription/models.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docker-app/qfieldcloud/subscription/models.py b/docker-app/qfieldcloud/subscription/models.py index 1737cdfd8..b113742b2 100644 --- a/docker-app/qfieldcloud/subscription/models.py +++ b/docker-app/qfieldcloud/subscription/models.py @@ -836,10 +836,12 @@ def create_subscription( if ( account.user.is_organization - and account.user.owner.remaining_trial_organizations > 0 + and account.user.organization_owner.remaining_trial_organizations > 0 ): - account.user.owner.remaining_trial_organizations -= 1 - account.user.owner.save(update_fields=["remaining_trial_organizations"]) + account.user.organization_owner.remaining_trial_organizations -= 1 + account.user.organization_owner.save( + update_fields=["remaining_trial_organizations"] + ) # the trial plan should be the default plan regular_plan = Plan.objects.get( From 3cb87d0bc8a810407b3ca9d42ef9c23a1677350a Mon Sep 17 00:00:00 2001 From: faebebin Date: Fri, 29 Sep 2023 16:03:00 +0200 Subject: [PATCH 22/42] Fix test --- docker-app/qfieldcloud/subscription/tests/test_subscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-app/qfieldcloud/subscription/tests/test_subscription.py b/docker-app/qfieldcloud/subscription/tests/test_subscription.py index 239a802ac..22f2c8561 100644 --- a/docker-app/qfieldcloud/subscription/tests/test_subscription.py +++ b/docker-app/qfieldcloud/subscription/tests/test_subscription.py @@ -748,7 +748,7 @@ def test_remaining_trial_organizations_is_set_and_decremented( u2.refresh_from_db() self.assertEqual(u2.remaining_trial_organizations, 1) u1.refresh_from_db() - self.assertEqual(u2.remaining_trial_organizations, 2) + self.assertEqual(u1.remaining_trial_organizations, 2) # remaining_trial_organizations is decremented down to 0 Organization.objects.create( From 1b1ff89ed691f9f903512f439a3df14d51b672ae Mon Sep 17 00:00:00 2001 From: faebebin Date: Fri, 29 Sep 2023 16:05:15 +0200 Subject: [PATCH 23/42] Add comment --- docker-app/qfieldcloud/subscription/tests/test_subscription.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker-app/qfieldcloud/subscription/tests/test_subscription.py b/docker-app/qfieldcloud/subscription/tests/test_subscription.py index 22f2c8561..5040ba2a0 100644 --- a/docker-app/qfieldcloud/subscription/tests/test_subscription.py +++ b/docker-app/qfieldcloud/subscription/tests/test_subscription.py @@ -764,6 +764,8 @@ def test_remaining_trial_organizations_is_set_and_decremented( u2.refresh_from_db() self.assertEqual(u2.remaining_trial_organizations, 0) + # NOTE changing ownership does not affect the `remaining_trial_organizations` and is not tested + def test_project_lists_duplicates_if_multiple_subscriptions(self): u1 = Person.objects.create(username="u1") old_subscription = u1.useraccount.current_subscription From 9c330294027161f9864e92301a72c51f4ee9765e Mon Sep 17 00:00:00 2001 From: why-not-try-calmer Date: Mon, 2 Oct 2023 09:23:35 +0200 Subject: [PATCH 24/42] handling exception closer to call site --- docker-app/worker_wrapper/wrapper.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/docker-app/worker_wrapper/wrapper.py b/docker-app/worker_wrapper/wrapper.py index 1307c3365..369e9f5a6 100644 --- a/docker-app/worker_wrapper/wrapper.py +++ b/docker-app/worker_wrapper/wrapper.py @@ -602,9 +602,14 @@ def after_docker_exception(self) -> None: def cancel_orphaned_workers() -> None: client: DockerClient = docker.from_env() - running_workers: list[Container] = client.containers.list( - filters={"label": f"app={settings.ENVIRONMENT}_worker"}, - ) + try: + running_workers: list[Container] = client.containers.list( + filters={"label": f"app={settings.ENVIRONMENT}_worker"}, + ) + except docker.errors.NotFound: + # We don't mind empty references since they mean there is no + # orphan to cancel. + return if len(running_workers) == 0: return From f3bbeeca5f5f7bc9ceb54d5afb411336adbe2eb3 Mon Sep 17 00:00:00 2001 From: Fabian Binder Date: Wed, 4 Oct 2023 14:34:23 +0200 Subject: [PATCH 25/42] Fix type in comment Co-authored-by: Ivan Ivanov --- docker-app/qfieldcloud/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-app/qfieldcloud/settings.py b/docker-app/qfieldcloud/settings.py index 07d6ba4d9..00d58f48f 100644 --- a/docker-app/qfieldcloud/settings.py +++ b/docker-app/qfieldcloud/settings.py @@ -95,7 +95,7 @@ "auditlog", # Local "qfieldcloud.core", - # listed after core becuase we overwrite createsuperuser command + # listed after core because we overwrite createsuperuser command "django.contrib.auth", "qfieldcloud.subscription", "qfieldcloud.notifs", From 1805d7a46167120941b2c34be7dad35906ce31cf Mon Sep 17 00:00:00 2001 From: faebebin Date: Wed, 4 Oct 2023 14:39:34 +0200 Subject: [PATCH 26/42] Recover command createuser.py --- .../core/management/commands/createuser.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 docker-app/qfieldcloud/core/management/commands/createuser.py diff --git a/docker-app/qfieldcloud/core/management/commands/createuser.py b/docker-app/qfieldcloud/core/management/commands/createuser.py new file mode 100644 index 000000000..4e0b71bcb --- /dev/null +++ b/docker-app/qfieldcloud/core/management/commands/createuser.py @@ -0,0 +1,42 @@ +from django.contrib.auth import get_user_model +from django.core.management.base import BaseCommand + + +class Command(BaseCommand): + """ + Creates a normal or super user using the CLI. + Unlike the Django's createsuperuser command, here we can pass the password as an argument. + This is a utility function that is expected to be used only for testing purposes. + """ + + help = """ + Create a user with given username, email and password + Usage: python manage.py createuser --username=test --email=test@test.com --password=test --superuser + """ + + def add_arguments(self, parser): + parser.add_argument("--username", type=str, required=True) + parser.add_argument("--password", type=str, required=True) + parser.add_argument("--email", type=str, required=True) + parser.add_argument("--superuser", action="store_true") + + def handle(self, *args, **options): + username = options.get("username") + password = options.get("password") + email = options.get("email") + is_superuser = options.get("superuser") + try: + User = get_user_model() + if not User.objects.filter(username=username).exists(): + User.objects.create_user( + username=username, + email=email, + password=password, + is_superuser=is_superuser, + ) + print(f"User {username} has been successfully created\n") + else: + print(f"User {username} already exists\n") + except Exception as e: + print("ERROR: Unable to create user\n%s\n" % e) + exit(1) From 215ddf51e5bd46b0c8122d417ec62b2e8a5a51a5 Mon Sep 17 00:00:00 2001 From: faebebin Date: Wed, 4 Oct 2023 14:42:10 +0200 Subject: [PATCH 27/42] Fix command createuser.py --- .../qfieldcloud/core/management/commands/createuser.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docker-app/qfieldcloud/core/management/commands/createuser.py b/docker-app/qfieldcloud/core/management/commands/createuser.py index 4e0b71bcb..6aba9f631 100644 --- a/docker-app/qfieldcloud/core/management/commands/createuser.py +++ b/docker-app/qfieldcloud/core/management/commands/createuser.py @@ -1,4 +1,4 @@ -from django.contrib.auth import get_user_model +from qfieldcloud.core.models import Person from django.core.management.base import BaseCommand @@ -26,9 +26,8 @@ def handle(self, *args, **options): email = options.get("email") is_superuser = options.get("superuser") try: - User = get_user_model() - if not User.objects.filter(username=username).exists(): - User.objects.create_user( + if not Person.objects.filter(username=username).exists(): + Person.objects.create_user( username=username, email=email, password=password, From fbbe298fb4cea7c1e808b6e7f5cb5de9ba29a349 Mon Sep 17 00:00:00 2001 From: faebebin Date: Wed, 4 Oct 2023 14:44:58 +0200 Subject: [PATCH 28/42] format --- docker-app/qfieldcloud/core/management/commands/createuser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-app/qfieldcloud/core/management/commands/createuser.py b/docker-app/qfieldcloud/core/management/commands/createuser.py index 6aba9f631..753b31390 100644 --- a/docker-app/qfieldcloud/core/management/commands/createuser.py +++ b/docker-app/qfieldcloud/core/management/commands/createuser.py @@ -1,5 +1,5 @@ -from qfieldcloud.core.models import Person from django.core.management.base import BaseCommand +from qfieldcloud.core.models import Person class Command(BaseCommand): From 2fcfed8c2b31ec3febc3807fcd6d4f9e82b8fbdd Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Wed, 4 Oct 2023 22:31:26 +0300 Subject: [PATCH 29/42] Fix OpenAPI and Swagger URLs and CORS --- docker-app/qfieldcloud/urls.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docker-app/qfieldcloud/urls.py b/docker-app/qfieldcloud/urls.py index a89fd6169..66680429f 100644 --- a/docker-app/qfieldcloud/urls.py +++ b/docker-app/qfieldcloud/urls.py @@ -42,14 +42,19 @@ name="index", ), path( - "schema/", + "swagger.yaml", SpectacularAPIView.as_view(), - name="schema", + name="openapi_schema", ), path( "swagger/", - SpectacularSwaggerView.as_view(url_name="schema"), - name="schema-swagger-ui", + SpectacularSwaggerView.as_view(url_name="openapi_schema"), + name="openapi_swaggerui", + ), + path( + "docs/", + SpectacularRedocView.as_view(url_name="openapi_schema"), + name="openapi_redoc", ), path( settings.QFIELDCLOUD_ADMIN_URI + "api/files//", @@ -65,11 +70,6 @@ name="project_file_download", ), path(settings.QFIELDCLOUD_ADMIN_URI, admin.site.urls), - path( - "docs/", - SpectacularRedocView.as_view(url_name="schema"), - name="schema-redoc", - ), path("api/v1/auth/login/", auth_views.LoginView.as_view()), path("api/v1/auth/token/", auth_views.LoginView.as_view()), path("api/v1/auth/user/", auth_views.UserView.as_view()), From 71f1790e96deee129e2df4a1d0196fa8543c4e70 Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Thu, 19 Oct 2023 12:35:35 +0300 Subject: [PATCH 30/42] Add a differentiation between login+pwd vs token errors in the API token related stuff is like this: { "code": "token_authentication_failed", "message": "Token authentication failed" } vs login failed: { "code": "authentication_failed", "message": "Authentication failed" } --- docker-app/qfieldcloud/authentication/authentication.py | 8 ++++---- docker-app/qfieldcloud/core/exceptions.py | 8 ++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/docker-app/qfieldcloud/authentication/authentication.py b/docker-app/qfieldcloud/authentication/authentication.py index 67b6da5bd..224d5e5f7 100644 --- a/docker-app/qfieldcloud/authentication/authentication.py +++ b/docker-app/qfieldcloud/authentication/authentication.py @@ -4,11 +4,11 @@ from django.utils import timezone from django.utils.translation import gettext as _ from qfieldcloud.core.models import User -from rest_framework import exceptions from rest_framework.authentication import ( TokenAuthentication as DjangoRestFrameworkTokenAuthentication, ) +from ..core.exceptions import AuthenticationViaTokenFailedError from .models import AuthToken @@ -54,13 +54,13 @@ def authenticate_credentials(self, key): try: token = model.objects.get(key=key) except model.DoesNotExist: - raise exceptions.AuthenticationFailed(_("Invalid token.")) + raise AuthenticationViaTokenFailedError(_("Invalid token.")) if not token.is_active: - raise exceptions.AuthenticationFailed(_("Token has expired.")) + raise AuthenticationViaTokenFailedError(_("Token has expired.")) if not token.user.is_active: - raise exceptions.AuthenticationFailed(_("User inactive or deleted.")) + raise AuthenticationViaTokenFailedError(_("User inactive or deleted.")) # update the token last used time # NOTE the UPDATE may be performed already on the `token = model.objects.get(key=key)`, but we lose "token has expired" exception. diff --git a/docker-app/qfieldcloud/core/exceptions.py b/docker-app/qfieldcloud/core/exceptions.py index 79b693300..fad060b0b 100644 --- a/docker-app/qfieldcloud/core/exceptions.py +++ b/docker-app/qfieldcloud/core/exceptions.py @@ -48,6 +48,14 @@ class AuthenticationFailedError(QFieldCloudException): status_code = status.HTTP_401_UNAUTHORIZED +class AuthenticationViaTokenFailedError(QFieldCloudException): + """Raised when QFieldCloud incoming request includes incorrect authentication token.""" + + code = "token_authentication_failed" + message = "Token authentication failed" + status_code = status.HTTP_401_UNAUTHORIZED + + class NotAuthenticatedError(QFieldCloudException): """Raised when QFieldCloud unauthenticated request fails the permission checks.""" From d09048abb69549e8d2df9dc99ad92abda6073cdc Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Thu, 19 Oct 2023 12:35:45 +0300 Subject: [PATCH 31/42] Fix typo in workflow step name --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 49da6f251..4a2971c4a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,7 +44,7 @@ jobs: run: | ln -s docker-compose.override.local.yml docker-compose.override.yml - - name: Check env vars coniguration + - name: Check env vars configuration run: | scripts/check_envvars.sh From dcb0769ea266825da8f61e62600b5c536cb1940b Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Thu, 19 Oct 2023 13:56:56 +0300 Subject: [PATCH 32/42] Disable tmate sessions Did not delete it in case it is really needed by someone --- .github/workflows/test.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 49da6f251..b33a82f68 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -83,9 +83,9 @@ jobs: Failed job run for branch `${{ github.head_ref || github.ref_name }}`, check ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} . gchat_webhook_url: ${{ secrets.GOOGLE_CHAT_WEBHOOK_URL }} - - name: Setup tmate session - if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 30 - with: - limit-access-to-actor: true + # - name: Setup tmate session + # if: ${{ failure() }} + # uses: mxschmitt/action-tmate@v3 + # timeout-minutes: 30 + # with: + # limit-access-to-actor: true From 47c28625844f48094ccde292b182acba52fed70a Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Wed, 27 Sep 2023 15:58:12 +0300 Subject: [PATCH 33/42] Remove the tmpdir with feedback and thumbnail after job is finished Otherwise get out of /tmp storage with long uptime. --- docker-app/worker_wrapper/wrapper.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker-app/worker_wrapper/wrapper.py b/docker-app/worker_wrapper/wrapper.py index 369e9f5a6..48ca4c315 100644 --- a/docker-app/worker_wrapper/wrapper.py +++ b/docker-app/worker_wrapper/wrapper.py @@ -1,6 +1,7 @@ import json import logging import os +import shutil import sys import tempfile import traceback @@ -223,6 +224,8 @@ def run(self): self.after_docker_run() + shutil.rmtree(str(self.shared_tempdir), ignore_errors=True) + self.job.finished_at = timezone.now() self.job.status = Job.Status.FINISHED self.job.save(update_fields=["status", "finished_at"]) From 8ff0563ea700d7262626147069e7861247140b2b Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Wed, 13 Sep 2023 15:43:44 +0300 Subject: [PATCH 34/42] Bump `qfieldcloud-sdk` dependency --- docker-qgis/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-qgis/requirements.txt b/docker-qgis/requirements.txt index 312de3e5d..1c2dcd36a 100644 --- a/docker-qgis/requirements.txt +++ b/docker-qgis/requirements.txt @@ -3,4 +3,4 @@ typing-extensions>=3.7.4.3,<3.7.5 tabulate==v0.8.9 sentry-sdk requests>=2.28.1 -qfieldcloud-sdk==0.7.0 +qfieldcloud-sdk==0.8.2 From 986b3ebd56bf8f75393a86776923e1480f09b81a Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Wed, 25 Oct 2023 05:00:14 +0300 Subject: [PATCH 35/42] Bump qgis to 3_32_3 --- docker-qgis/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-qgis/Dockerfile b/docker-qgis/Dockerfile index 5e18678ac..07f815806 100644 --- a/docker-qgis/Dockerfile +++ b/docker-qgis/Dockerfile @@ -1,4 +1,4 @@ -FROM qgis/qgis:final-3_32_2 +FROM qgis/qgis:final-3_32_3 RUN apt-get update \ && apt-get upgrade -y \ From 1b26897f8df6eae1935e94c183274a78b10b45b3 Mon Sep 17 00:00:00 2001 From: Ivan Ivanov Date: Wed, 25 Oct 2023 05:11:32 +0300 Subject: [PATCH 36/42] Save some GitHub electricity --- .github/workflows/test.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 944fe5509..86dab2105 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,7 +1,14 @@ name: Test +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + on: push: + branches: + - master + - release pull_request: jobs: From c6d0ebc59faa68c1eb25a72897685420abbb2d73 Mon Sep 17 00:00:00 2001 From: Matthias Kuhn Date: Wed, 25 Oct 2023 12:48:49 +0200 Subject: [PATCH 37/42] Rename tag to docker_tag --- .github/workflows/build_and_push.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 1798b50f2..56000c9b5 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -21,7 +21,7 @@ jobs: VERSION=${GITHUB_REF#refs/tags/v} fi TAG=${VERSION} - echo ::set-output name=tag::${TAG} + echo ::set-output name=docker_tag::${TAG} git submodule update --init --recursive --depth 1 - name: Set up Docker Buildx @@ -61,7 +61,7 @@ jobs: context: ./docker-app file: ./docker-app/Dockerfile push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-app:${{ steps.prepare.outputs.tag }} + tags: opengisch/qfieldcloud-app:${{ steps.prepare.outputs.docker_tag }} - name: Docker Build and Push Worker id: docker_build_and_push_worker @@ -72,7 +72,7 @@ jobs: context: ./docker-app file: ./docker-app/Dockerfile push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-worker-wrapper:${{ steps.prepare.outputs.tag }} + tags: opengisch/qfieldcloud-worker-wrapper:${{ steps.prepare.outputs.docker_tag }} # QGIS - name: Docker Test QGIS @@ -91,7 +91,7 @@ jobs: context: ./docker-qgis file: ./docker-qgis/Dockerfile push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.tag }} + tags: opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.docker_tag }} - name: Trigger deployment on private repository uses: peter-evans/repository-dispatch@v1 @@ -99,4 +99,4 @@ jobs: token: ${{ secrets.GIT_ACCESS_TOKEN }} repository: opengisch/qfieldcloud-private event-type: public_dispatch - client-payload: '{"version": "${{ steps.prepare.outputs.tag }}"}' + client-payload: '{"version": "${{ steps.prepare.outputs.docker_tag }}"}' From 23b63c4e593e2030e8b8626f49f14b46ceb59e3f Mon Sep 17 00:00:00 2001 From: Matthias Kuhn Date: Wed, 25 Oct 2023 12:50:02 +0200 Subject: [PATCH 38/42] Replace deprecated set-output command --- .github/workflows/build_and_push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 56000c9b5..631dfb5a7 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -21,7 +21,7 @@ jobs: VERSION=${GITHUB_REF#refs/tags/v} fi TAG=${VERSION} - echo ::set-output name=docker_tag::${TAG} + echo "docker_tag=${TAG}" >> $GITHUB_OUTPUT git submodule update --init --recursive --depth 1 - name: Set up Docker Buildx From a77a35c975027ec015fb933cd61fccf98b27cc70 Mon Sep 17 00:00:00 2001 From: Matthias Kuhn Date: Wed, 25 Oct 2023 12:58:26 +0200 Subject: [PATCH 39/42] Push git sha next to latest and version tags --- .github/workflows/build_and_push.yml | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 631dfb5a7..4d3d4ba31 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -3,6 +3,7 @@ on: push: branches: - master + - docker_tags tags: - "v*.*.*" jobs: @@ -15,13 +16,13 @@ jobs: - name: Prepare id: prepare run: | - VERSION="latest" - TAG="" + TAG="latest" + COMMIT="${GITHUB_SHA}" if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF#refs/tags/v} + TAG=${GITHUB_REF#refs/tags/v} fi - TAG=${VERSION} echo "docker_tag=${TAG}" >> $GITHUB_OUTPUT + echo "docker_commit=${COMMIT}" >> $GITHUB_OUTPUT git submodule update --init --recursive --depth 1 - name: Set up Docker Buildx @@ -61,7 +62,9 @@ jobs: context: ./docker-app file: ./docker-app/Dockerfile push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-app:${{ steps.prepare.outputs.docker_tag }} + tags: | + opengisch/qfieldcloud-app:${{ steps.prepare.outputs.docker_tag }} + opengisch/qfieldcloud-app:${{ steps.prepare.outputs.docker_commit }} - name: Docker Build and Push Worker id: docker_build_and_push_worker @@ -72,7 +75,9 @@ jobs: context: ./docker-app file: ./docker-app/Dockerfile push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-worker-wrapper:${{ steps.prepare.outputs.docker_tag }} + tags: | + opengisch/qfieldcloud-worker-wrapper:${{ steps.prepare.outputs.docker_tag }} + opengisch/qfieldcloud-worker-wrapper:${{ steps.prepare.outputs.docker_commit }} # QGIS - name: Docker Test QGIS @@ -91,7 +96,9 @@ jobs: context: ./docker-qgis file: ./docker-qgis/Dockerfile push: ${{ github.event_name != 'pull_request' }} - tags: opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.docker_tag }} + tags: | + opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.docker_tag }} + opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.docker_commit }} - name: Trigger deployment on private repository uses: peter-evans/repository-dispatch@v1 From 3743e7909ffa9d6e1d0e75038a683236da6a81f2 Mon Sep 17 00:00:00 2001 From: Matthias Kuhn Date: Wed, 25 Oct 2023 13:52:13 +0200 Subject: [PATCH 40/42] Use `commit-shortSha` for docker image tags --- .github/workflows/build_and_push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 4d3d4ba31..9cb692e3f 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -17,7 +17,7 @@ jobs: id: prepare run: | TAG="latest" - COMMIT="${GITHUB_SHA}" + COMMIT="commit-$(git rev-parse --short ${{ github.sha }})" if [[ $GITHUB_REF == refs/tags/* ]]; then TAG=${GITHUB_REF#refs/tags/v} fi From d7bb9a8fb45210531c05de6328bf7db5ddf2cf72 Mon Sep 17 00:00:00 2001 From: Matthias Kuhn Date: Wed, 25 Oct 2023 13:53:08 +0200 Subject: [PATCH 41/42] Do not autodeploy latest public --- .github/workflows/build_and_push.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 9cb692e3f..6ab1f9f70 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -99,11 +99,3 @@ jobs: tags: | opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.docker_tag }} opengisch/qfieldcloud-qgis:${{ steps.prepare.outputs.docker_commit }} - - - name: Trigger deployment on private repository - uses: peter-evans/repository-dispatch@v1 - with: - token: ${{ secrets.GIT_ACCESS_TOKEN }} - repository: opengisch/qfieldcloud-private - event-type: public_dispatch - client-payload: '{"version": "${{ steps.prepare.outputs.docker_tag }}"}' From 648cc98fcad50a216fe52dbd99b2bcf0a4d31386 Mon Sep 17 00:00:00 2001 From: Matthias Kuhn Date: Wed, 25 Oct 2023 13:57:31 +0200 Subject: [PATCH 42/42] Remove test condition --- .github/workflows/build_and_push.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build_and_push.yml b/.github/workflows/build_and_push.yml index 6ab1f9f70..8cb55558b 100644 --- a/.github/workflows/build_and_push.yml +++ b/.github/workflows/build_and_push.yml @@ -3,7 +3,6 @@ on: push: branches: - master - - docker_tags tags: - "v*.*.*" jobs: