From fd4606e495f3bed5cb14fe903d532baf2efc9ac6 Mon Sep 17 00:00:00 2001 From: Michael Bear <38406045+mjbear@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:56:39 -0400 Subject: [PATCH] Resolve Fortinet HA Status parsing issues (#1861) --- .../fortinet_get_system_ha_status.textfsm | 6 +-- .../fortinet_get_system_ha_status_5.6.yml | 3 +- .../fortinet_get_system_ha_status_6.0.yml | 3 +- ...fortinet_get_system_ha_status_6.0_noha.yml | 3 +- .../fortinet_get_system_ha_status_6.2.yml | 3 +- .../fortinet_get_system_ha_status_6.4.yml | 3 +- ...ortinet_get_system_ha_status_7.0_hbdev.raw | 43 +++++++++++++++++++ ...ortinet_get_system_ha_status_7.0_hbdev.yml | 19 ++++++++ ...rtinet_get_system_ha_status_7.0_mondev.raw | 40 +++++++++++++++++ ...tinet_get_system_ha_status_7.0_mondev.yml} | 3 +- ...net_get_system_ha_status_7.0_unhealthy.raw | 40 ----------------- 11 files changed, 117 insertions(+), 49 deletions(-) create mode 100644 tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.raw create mode 100644 tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.yml create mode 100644 tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.raw rename tests/fortinet/get_system_ha_status/{fortinet_get_system_ha_status_7.0_unhealthy.yml => fortinet_get_system_ha_status_7.0_mondev.yml} (88%) delete mode 100644 tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.raw diff --git a/ntc_templates/templates/fortinet_get_system_ha_status.textfsm b/ntc_templates/templates/fortinet_get_system_ha_status.textfsm index 49109e0a66..617036675d 100644 --- a/ntc_templates/templates/fortinet_get_system_ha_status.textfsm +++ b/ntc_templates/templates/fortinet_get_system_ha_status.textfsm @@ -2,7 +2,7 @@ # FG Version: 5.6, 6.0, 6.2, 6.4, 7.0 # HW : varied # -Value HA_HEALTH (\S+|.*) +Value List HA_HEALTH (\S+(?:.*)) Value MODEL (\S+) Value HA_MODE ([\S\s]+) Value HA_GROUP (\S+) @@ -20,7 +20,7 @@ Value HA_SLAVE_UNIT_INDEX (\S+) Start ^HA\s+Health\s+Status:\s+${HA_HEALTH} - ^HA\s+Health\s+Status:$$ -> UnhealthyStatus + ^HA\s+Health\s+Status: -> UnhealthyStatus ^Model:\s+${MODEL} ^Mode:\s+${HA_MODE} ^Group:\s+${HA_GROUP} @@ -38,7 +38,7 @@ Start UnhealthyStatus # semicolon necessary to anchor - ^${HA_HEALTH};$$ + ^\s+${HA_HEALTH};\s*$$ ^Model:\s+${MODEL} -> Start Configuration_Status diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_5.6.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_5.6.yml index fa011d9dee..f345d83721 100644 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_5.6.yml +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_5.6.yml @@ -3,7 +3,8 @@ parsed_sample: - cluster_state_changed_time: "2020-11-18 20:06:07" cluster_uptime: "913 days 6:17:44" ha_group: "0" - ha_health: "OK" + ha_health: + - "OK" ha_master_unit_index: "0" ha_master_unit_name: "fgt_200d_b" ha_master_unit_serial: "FG200Dyyyyyyyyyy" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0.yml index 02b16e163b..0961ddfdaf 100644 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0.yml +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0.yml @@ -3,7 +3,8 @@ parsed_sample: - cluster_state_changed_time: "2019-08-16 11:08:27" cluster_uptime: "764 days 22:45:44" ha_group: "0" - ha_health: "OK" + ha_health: + - "OK" ha_master_unit_index: "1" ha_master_unit_name: "fgt-200d_a" ha_master_unit_serial: "FG200Dxxxxxxxxxx" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0_noha.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0_noha.yml index 1b0ecf8358..a581b4445e 100644 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0_noha.yml +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.0_noha.yml @@ -3,7 +3,8 @@ parsed_sample: - cluster_state_changed_time: "N/A" cluster_uptime: "0 days 0:0:0" ha_group: "0" - ha_health: "OK" + ha_health: + - "OK" ha_master_unit_index: "" ha_master_unit_name: "" ha_master_unit_serial: "" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.2.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.2.yml index 6c5b3a1667..cfb6c3e5bd 100644 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.2.yml +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.2.yml @@ -3,7 +3,8 @@ parsed_sample: - cluster_state_changed_time: "2020-12-02 22:40:46" cluster_uptime: "36 days 22:20:40" ha_group: "5" - ha_health: "OK" + ha_health: + - "OK" ha_master_unit_index: "1" ha_master_unit_name: "fgt-600e_a" ha_master_unit_serial: "FG6H0Exxxxxxxxxx" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.4.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.4.yml index b1a87717b5..03b58f4718 100644 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.4.yml +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_6.4.yml @@ -3,7 +3,8 @@ parsed_sample: - cluster_state_changed_time: "2021-10-12 11:24:28" cluster_uptime: "40 days 19:16:5" ha_group: "0" - ha_health: "OK" + ha_health: + - "OK" ha_master_unit_index: "0" ha_master_unit_name: "FW" ha_master_unit_serial: "FGXXXXXXXXXXXXXX" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.raw b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.raw new file mode 100644 index 0000000000..531bac60fa --- /dev/null +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.raw @@ -0,0 +1,43 @@ +HA Health Status: + WARNING: FGT40XXXXXXXXXXX has hbdev down; + WARNING: FGT40YYYYYYYYYYY has hbdev down; +Model: FortiGate-40F +Mode: HA A-P +Group: 28 +Debug: 0 +Cluster Uptime: 23 days 1:31:39 +Cluster state change time: 2024-09-16 13:03:51 +Primary selected using: + <2024/09/16 13:03:51> FGT40XXXXXXXXXXX is selected as the primary because its override priority is larger than peer member FGT40YYYYYYYYYYY. + <2024/09/11 12:13:30> FGT40XXXXXXXXXXX is selected as the primary because it's the only member in the cluster. + <2024/09/09 16:15:43> FGT40XXXXXXXXXXX is selected as the primary because it's the only member in the cluster. +ses_pickup: enable, ses_pickup_delay=disable +override: enable +Configuration Status: + FGT40XXXXXXXXXXX(updated 3 seconds ago): in-sync + FGT40YYYYYYYYYYY(updated 4 seconds ago): in-sync +System Usage stats: + FGT40XXXXXXXXXXX(updated 3 seconds ago): + sessions=196, average-cpu-user/nice/system/idle=0%/0%/0%/100%, memory=34% + FGT40YYYYYYYYYYY(updated 4 seconds ago): + sessions=43, average-cpu-user/nice/system/idle=0%/0%/0%/99%, memory=31% +HBDEV stats: + FGT40XXXXXXXXXXX(updated 3 seconds ago): + lan2: physical/00, down, rx-bytes/packets/dropped/errors=2289976940/6414484/0/0, tx=2334914934/6414604/0/0 + lan3: physical/1000auto, up, rx-bytes/packets/dropped/errors=2859911090/8634708/0/0, tx=4096246721/11393870/0/0 + FGT40YYYYYYYYYYY(updated 4 seconds ago): + lan2: physical/00, down, rx-bytes/packets/dropped/errors=2334910566/6414592/0/0, tx=2289993719/6414531/0/0 + lan3: physical/1000auto, up, rx-bytes/packets/dropped/errors=4096363031/11393865/0/0, tx=2859518832/8634703/0/0 +MONDEV stats: + FGT40XXXXXXXXXXX(updated 3 seconds ago): + lan1: physical/100auto, up, rx-bytes/packets/dropped/errors=81549709010/222074671/0/0, tx=215809491211/241726140/0/0 + wan: physical/100auto, up, rx-bytes/packets/dropped/errors=235849981176/264545599/0/0, tx=96429336043/236634292/0/0 + FGT40YYYYYYYYYYY(updated 4 seconds ago): + lan1: physical/100auto, up, rx-bytes/packets/dropped/errors=422928973/3422783/0/0, tx=12363136/46749/0/0 + wan: physical/100auto, up, rx-bytes/packets/dropped/errors=43917368/544179/0/0, tx=86212/874/0/0 +Primary : FGT-fw-a , FGT40XXXXXXXXXXX, HA cluster index = 1 +Secondary : FGT-fw-b , FGT40YYYYYYYYYYY, HA cluster index = 0 +number of vcluster: 1 +vcluster 1: work 169.254.0.2 +Primary: FGT40XXXXXXXXXXX, HA operating index = 0 +Secondary: FGT40YYYYYYYYYYY, HA operating index = 1 \ No newline at end of file diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.yml new file mode 100644 index 0000000000..6c67d9ceed --- /dev/null +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_hbdev.yml @@ -0,0 +1,19 @@ +--- +parsed_sample: + - cluster_state_changed_time: "2024-09-16 13:03:51" + cluster_uptime: "23 days 1:31:39" + ha_group: "28" + ha_health: + - "WARNING: FGT40XXXXXXXXXXX has hbdev down" + - "WARNING: FGT40YYYYYYYYYYY has hbdev down" + ha_master_unit_index: "1" + ha_master_unit_name: "FGT-fw-a" + ha_master_unit_serial: "FGT40XXXXXXXXXXX" + ha_mode: "HA A-P" + ha_override_status: "enable" + ha_session_pickup_delay: "disable" + ha_session_pickup_status: "enable" + ha_slave_unit_index: "0" + ha_slave_unit_name: "FGT-fw-b" + ha_slave_unit_serial: "FGT40YYYYYYYYYYY" + model: "FortiGate-40F" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.raw b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.raw new file mode 100644 index 0000000000..f52061b9d6 --- /dev/null +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.raw @@ -0,0 +1,40 @@ +HA Health Status: + WARNING: FGT40FYYYYYYYYYY has mondev down; +Model: FortiGate-40F +Mode: HA A-P +Group: 172 +Debug: 0 +Cluster Uptime: 63 days 22:15:42 +Cluster state change time: 2024-02-11 15:25:27 +Primary selected using: + <2024/02/11 15:25:27> FGT40FXXXXXXXXXX is selected as the primary because the value 0 of link-failure + pingsvr-failure is less than peer member FGT40FYYYYYYYYYY. +ses_pickup: enable, ses_pickup_delay=disable +override: enable +Configuration Status: + FGT40FXXXXXXXXXX(updated 0 seconds ago): in-sync + FGT40FYYYYYYYYYY(updated 0 seconds ago): in-sync +System Usage stats: + FGT40FXXXXXXXXXX(updated 0 seconds ago): + sessions=768, average-cpu-user/nice/system/idle=0%/0%/0%/99%, memory=35% + FGT40FYYYYYYYYYY(updated 0 seconds ago): + sessions=634, average-cpu-user/nice/system/idle=0%/0%/0%/100%, memory=31% +HBDEV stats: + FGT40FXXXXXXXXXX(updated 0 seconds ago): + lan2: physical/1000auto, up, rx-bytes/packets/dropped/errors=9997131732/27616386/0/0, tx=10080077920/27616652/0/0 + lan3: physical/1000auto, up, rx-bytes/packets/dropped/errors=11772621099/36693306/0/0, tx=26151306122/60128423/0/0 + FGT40FYYYYYYYYYY(updated 0 seconds ago): + lan2: physical/1000auto, up, rx-bytes/packets/dropped/errors=10080077920/27616652/0/0, tx=9997131732/27616386/0/0 + lan3: physical/1000auto, up, rx-bytes/packets/dropped/errors=26151777728/60128423/0/0, tx=11771044717/36693306/0/0 +MONDEV stats: + FGT40FXXXXXXXXXX(updated 0 seconds ago): + lan1: physical/100auto, up, rx-bytes/packets/dropped/errors=535463275509/3388288017/0/0, tx=3023591767050/4114831127/0/0 + wan: physical/100auto, up, rx-bytes/packets/dropped/errors=3314385262333/4439875482/0/0, tx=768352772861/3445252569/0/0 + FGT40FYYYYYYYYYY(updated 0 seconds ago): + lan1: physical/00, down, rx-bytes/packets/dropped/errors=0/0/0/0, tx=0/0/0/0 + wan: physical/100auto, up, rx-bytes/packets/dropped/errors=15792718293/245544650/0/0, tx=0/0/0/0 +Primary : FGT-fw-a, FGT40FXXXXXXXXXX, HA cluster index = 1 +Secondary : FGT-fw-b, FGT40FYYYYYYYYYY, HA cluster index = 0 +number of vcluster: 1 +vcluster 1: work 169.254.0.2 +Primary: FGT40FXXXXXXXXXX, HA operating index = 0 +Secondary: FGT40FYYYYYYYYYY, HA operating index = 1 diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.yml b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.yml similarity index 88% rename from tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.yml rename to tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.yml index c0c2527872..b195cebe15 100644 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.yml +++ b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_mondev.yml @@ -3,7 +3,8 @@ parsed_sample: - cluster_state_changed_time: "2024-02-11 15:25:27" cluster_uptime: "63 days 22:15:42" ha_group: "172" - ha_health: "WARNING: FGT40FYYYYYYYYYY has mondev down" + ha_health: + - "WARNING: FGT40FYYYYYYYYYY has mondev down" ha_master_unit_index: "1" ha_master_unit_name: "FGT-fw-a" ha_master_unit_serial: "FGT40FXXXXXXXXXX" diff --git a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.raw b/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.raw deleted file mode 100644 index 23bfd534c8..0000000000 --- a/tests/fortinet/get_system_ha_status/fortinet_get_system_ha_status_7.0_unhealthy.raw +++ /dev/null @@ -1,40 +0,0 @@ -HA Health Status: -WARNING: FGT40FYYYYYYYYYY has mondev down; -Model: FortiGate-40F -Mode: HA A-P -Group: 172 -Debug: 0 -Cluster Uptime: 63 days 22:15:42 -Cluster state change time: 2024-02-11 15:25:27 -Primary selected using: -<2024/02/11 15:25:27> FGT40FXXXXXXXXXX is selected as the primary because the value 0 of link-failure + pingsvr-failure is less than peer member FGT40FYYYYYYYYYY. -ses_pickup: enable, ses_pickup_delay=disable -override: enable -Configuration Status: -FGT40FXXXXXXXXXX(updated 0 seconds ago): in-sync -FGT40FYYYYYYYYYY(updated 0 seconds ago): in-sync -System Usage stats: -FGT40FXXXXXXXXXX(updated 0 seconds ago): -sessions=768, average-cpu-user/nice/system/idle=0%/0%/0%/99%, memory=35% -FGT40FYYYYYYYYYY(updated 0 seconds ago): -sessions=634, average-cpu-user/nice/system/idle=0%/0%/0%/100%, memory=31% -HBDEV stats: -FGT40FXXXXXXXXXX(updated 0 seconds ago): -lan2: physical/1000auto, up, rx-bytes/packets/dropped/errors=9997131732/27616386/0/0, tx=10080077920/27616652/0/0 -lan3: physical/1000auto, up, rx-bytes/packets/dropped/errors=11772621099/36693306/0/0, tx=26151306122/60128423/0/0 -FGT40FYYYYYYYYYY(updated 0 seconds ago): -lan2: physical/1000auto, up, rx-bytes/packets/dropped/errors=10080077920/27616652/0/0, tx=9997131732/27616386/0/0 -lan3: physical/1000auto, up, rx-bytes/packets/dropped/errors=26151777728/60128423/0/0, tx=11771044717/36693306/0/0 -MONDEV stats: -FGT40FXXXXXXXXXX(updated 0 seconds ago): -lan1: physical/100auto, up, rx-bytes/packets/dropped/errors=535463275509/3388288017/0/0, tx=3023591767050/4114831127/0/0 -wan: physical/100auto, up, rx-bytes/packets/dropped/errors=3314385262333/4439875482/0/0, tx=768352772861/3445252569/0/0 -FGT40FYYYYYYYYYY(updated 0 seconds ago): -lan1: physical/00, down, rx-bytes/packets/dropped/errors=0/0/0/0, tx=0/0/0/0 -wan: physical/100auto, up, rx-bytes/packets/dropped/errors=15792718293/245544650/0/0, tx=0/0/0/0 -Primary : FGT-fw-a, FGT40FXXXXXXXXXX, HA cluster index = 1 -Secondary : FGT-fw-b, FGT40FYYYYYYYYYY, HA cluster index = 0 -number of vcluster: 1 -vcluster 1: work 169.254.0.2 -Primary: FGT40FXXXXXXXXXX, HA operating index = 0 -Secondary: FGT40FYYYYYYYYYY, HA operating index = 1