Skip to content

Commit

Permalink
[FR] Support new_terms schema import/export w/custom format (#3890)
Browse files Browse the repository at this point in the history
* [FR] Support new_terms schema import/export w/custom format

* fix formatter for filters

* handle both rule formats when parsing data view

(cherry picked from commit 2110ad5)
  • Loading branch information
Mikaayenson authored and github-actions[bot] committed Jul 12, 2024
1 parent 9581c02 commit e4e36b7
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 2 deletions.
5 changes: 4 additions & 1 deletion detection_rules/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,10 @@ def import_rules_into_repo(input_file, required_only, directory):
base_path = contents.get('name') or contents.get('rule', {}).get('name')
base_path = rulename_to_filename(base_path) if base_path else base_path
rule_path = os.path.join(RULES_DIR, base_path) if base_path else None
additional = ['index'] if not contents.get('data_view_id') else ['data_view_id']

# handle both rule json formats loaded from kibana and toml
data_view_id = contents.get("data_view_id") or contents.get("rule", {}).get("data_view_id")
additional = ["index"] if not data_view_id else ["data_view_id"]
rule_prompt(rule_path, required_only=required_only, save=True, verbose=True,
additional_required=additional, **contents)

Expand Down
23 changes: 22 additions & 1 deletion detection_rules/rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from semver import Version
from marko.block import Document as MarkoDocument
from marko.ext.gfm import gfm
from marshmallow import ValidationError, validates_schema
from marshmallow import ValidationError, validates_schema, pre_load

import kql

Expand Down Expand Up @@ -768,6 +768,27 @@ class HistoryWindowStart:
type: Literal["new_terms"]
new_terms: NewTermsMapping

@pre_load
def preload_data(self, data: dict, **kwargs) -> dict:
"""Preloads and formats the data to match the required schema."""
if "new_terms_fields" in data and "history_window_start" in data:
new_terms_mapping = {
"field": "new_terms_fields",
"value": data["new_terms_fields"],
"history_window_start": [
{
"field": "history_window_start",
"value": data["history_window_start"]
}
]
}
data["new_terms"] = new_terms_mapping

# cleanup original fields after building into our toml format
data.pop("new_terms_fields")
data.pop("history_window_start")
return data

def transform(self, obj: dict) -> dict:
"""Transforms new terms data to API format for Kibana."""
obj[obj["new_terms"].get("field")] = obj["new_terms"].get("value")
Expand Down
5 changes: 5 additions & 0 deletions detection_rules/rule_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,11 @@ def _do_write(_data, _contents):
preserved_fields = ["params.message"]
v = [preserve_formatting_for_fields(action, preserved_fields) for action in v]

if k == 'filters':
# explicitly preserve formatting for value field in filters
preserved_fields = ["meta.value"]
v = [preserve_formatting_for_fields(meta, preserved_fields) for meta in v]

if k == 'note' and isinstance(v, str):
# Transform instances of \ to \\ as calling write will convert \\ to \.
# This will ensure that the output file has the correct number of backslashes.
Expand Down

0 comments on commit e4e36b7

Please sign in to comment.