From 4c3b144009c36a708e7cd444045b3fcc1774bac3 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Thu, 9 Feb 2017 11:27:34 +0000 Subject: [PATCH 01/24] Apply existing patches to make all networks common --- f5lbaasdriver/v2/bigip/service_builder.py | 48 +++++++++++------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 25abdc40..509471c3 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -94,11 +94,10 @@ def build(self, context, loadbalancer, agent): segment_data = self.disconnected_service.get_network_segment( context, agent_config, network) if segment_data: - network['provider:segmentation_id'] = \ - segment_data.get('segmentation_id', None) - if 'provider:network_type' in network: - network['provider:network_type'] = \ - segment_data.get('network_type', None) + network['provider:segmentation_id'] = segment_data.get('segmentation_id', None) + network['provider:network_type'] = segment_data.get('network_type', None) + network['provider:physical_network'] = segment_data.get('physical_network', None) + network_map[network_id] = network # Check if the tenant can create a loadbalancer on the network. @@ -324,25 +323,26 @@ def deserialize_agent_configurations(self, configurations): @log_helpers.log_method_call def _is_common_network(self, network, agent): - common_external_networks = False - common_networks = {} - - if agent and "configurations" in agent: - agent_configs = self.deserialize_agent_configurations( - agent['configurations']) - - if 'common_networks' in agent_configs: - common_networks = agent_configs['common_networks'] - - if 'f5_common_external_networks' in agent_configs: - common_external_networks = ( - agent_configs['f5_common_external_networks']) - - return (network['shared'] or - (network['id'] in common_networks) or - ('router:external' in network and - network['router:external'] and - common_external_networks)) + return True + # common_external_networks = False + # common_networks = {} + # + # if agent and "configurations" in agent: + # agent_configs = self.deserialize_agent_configurations( + # agent['configurations']) + # + # if 'common_networks' in agent_configs: + # common_networks = agent_configs['common_networks'] + # + # if 'f5_common_external_networks' in agent_configs: + # common_external_networks = ( + # agent_configs['f5_common_external_networks']) + # + # return (network['shared'] or + # (network['id'] in common_networks) or + # ('router:external' in network and + # network['router:external'] and + # common_external_networks)) def _valid_tenant_ids(self, network, lb_tenant_id, agent): if (network['tenant_id'] == lb_tenant_id): From f770bc3ca16570606e1b37f270c6a2c2b342e6ce Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 1 Mar 2017 09:28:15 +0000 Subject: [PATCH 02/24] Add RPC call for getting LBs per network --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 44 ++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index e20f797e..d2beb363 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -221,6 +221,50 @@ def get_pending_loadbalancers(self, context, env, group=None, host=None): else: return loadbalancers + @log_helpers.log_method_call + def get_loadbalancers_by_network(self, context, env, network_id, group=None, host=None,): + """Get all loadbalancers for this group in this env.""" + loadbalancers = [] + plugin = self.driver.plugin + + + network = self.driver.plugin.db._core_plugin.get_network( + context, + network_id) + + subnets = network['subnets'] + + + # get subnets on network and then filter based on the vip subnet. + + with context.session.begin(subtransactions=True): + agents = self.driver.scheduler.get_agents_in_env( + context, + self.driver.plugin, + env, + group) + + for agent in agents: + agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + context, + agent.id + ) + for lb in agent_lbs: + if lb.vip_subnet_id in subnets : + loadbalancers.append( + { + 'agent_host': agent['host'], + 'lb_id': lb.id, + 'tenant_id': lb.tenant_id, + 'network_id': network_id + } + ) + if host: + return [lb for lb in loadbalancers if lb['agent_host'] == host] + else: + return loadbalancers + + @log_helpers.log_method_call def update_loadbalancer_stats(self, context, From d94b5694b978ce5017d08a20fb6136cb5ca053fd Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 2 May 2017 13:28:05 +0100 Subject: [PATCH 03/24] Prevent ports being created for external members --- f5lbaasdriver/v2/bigip/service_builder.py | 39 ++++++++++++----------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 28d8e410..92e89e98 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -175,24 +175,27 @@ def _get_extended_member(self, context, member): if len(ports) == 1: member_dict['port'] = ports[0] self._populate_member_network(context, member_dict, network) - else: - if not ports: - cidr = IPNetwork(subnet['cidr']) - member_ip = IPNetwork("%s/%d" % - (member.address, cidr.prefixlen)) - if cidr == member_ip: - LOG.debug("Create port for member") - member_dict['port'] = \ - self.q_client.create_port_for_member( - context, member.address, - subnet_id=subnet_id) - self._populate_member_network( - context, member_dict, network) - else: - LOG.error("Member IP %s is not in subnet %s" % - (member.address, subnet['cidr'])) - else: - LOG.error("Multiple ports found: %s" % ports) + + # Do not manage neutron ports for external members + + # else: + # if not ports: + # cidr = IPNetwork(subnet['cidr']) + # member_ip = IPNetwork("%s/%d" % + # (member.address, cidr.prefixlen)) + # if cidr == member_ip: + # LOG.debug("Create port for member") + # member_dict['port'] = \ + # self.q_client.create_port_for_member( + # context, member.address, + # subnet_id=subnet_id) + # self._populate_member_network( + # context, member_dict, network) + # else: + # LOG.error("Member IP %s is not in subnet %s" % + # (member.address, subnet['cidr'])) + # else: + # LOG.error("Multiple ports found: %s" % ports) return (member_dict, subnet, network) From 237ae5259204111ffd6a13e11bb7be97b53c9350 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 30 May 2017 18:15:58 +0100 Subject: [PATCH 04/24] All networks are common --- f5lbaasdriver/v2/bigip/service_builder.py | 44 ++++++++++++----------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 92e89e98..a20dac05 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -109,7 +109,7 @@ def build(self, context, loadbalancer, agent): if (agent and not self._valid_tenant_ids(network, loadbalancer.tenant_id, agent)): - LOG.error("Creating a loadbalancer %s for tenant %s on a" + LOG.debug("Creating a loadbalancer %s for tenant %s on a" " non-shared network %s owned by %s." % ( loadbalancer.id, loadbalancer.tenant_id, @@ -350,25 +350,29 @@ def deserialize_agent_configurations(self, configurations): @log_helpers.log_method_call def _is_common_network(self, network, agent): - common_external_networks = False - common_networks = {} - - if agent and "configurations" in agent: - agent_configs = self.deserialize_agent_configurations( - agent['configurations']) - - if 'common_networks' in agent_configs: - common_networks = agent_configs['common_networks'] - - if 'f5_common_external_networks' in agent_configs: - common_external_networks = ( - agent_configs['f5_common_external_networks']) - - return (network['shared'] or - (network['id'] in common_networks) or - ('router:external' in network and - network['router:external'] and - common_external_networks)) + # all networks are common + return True + + + # common_external_networks = False + # common_networks = {} + # + # if agent and "configurations" in agent: + # agent_configs = self.deserialize_agent_configurations( + # agent['configurations']) + # + # if 'common_networks' in agent_configs: + # common_networks = agent_configs['common_networks'] + # + # if 'f5_common_external_networks' in agent_configs: + # common_external_networks = ( + # agent_configs['f5_common_external_networks']) + # + # return (network['shared'] or + # (network['id'] in common_networks) or + # ('router:external' in network and + # network['router:external'] and + # common_external_networks)) def _valid_tenant_ids(self, network, lb_tenant_id, agent): if (network['tenant_id'] == lb_tenant_id): From d9b33a1cbbddb96d78663b84aae3594501256021 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 13 Jun 2017 13:39:45 +0100 Subject: [PATCH 05/24] Update with previous patch --- f5lbaasdriver/v2/bigip/service_builder.py | 39 ++++++++++++----------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 2efb92f6..b90137f9 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -333,25 +333,26 @@ def deserialize_agent_configurations(self, configurations): @log_helpers.log_method_call def _is_common_network(self, network, agent): - common_external_networks = False - common_networks = {} - - if agent and "configurations" in agent: - agent_configs = self.deserialize_agent_configurations( - agent['configurations']) - - if 'common_networks' in agent_configs: - common_networks = agent_configs['common_networks'] - - if 'f5_common_external_networks' in agent_configs: - common_external_networks = ( - agent_configs['f5_common_external_networks']) - - return (network['shared'] or - (network['id'] in common_networks) or - ('router:external' in network and - network['router:external'] and - common_external_networks)) + return True + # common_external_networks = False + # common_networks = {} + # + # if agent and "configurations" in agent: + # agent_configs = self.deserialize_agent_configurations( + # agent['configurations']) + # + # if 'common_networks' in agent_configs: + # common_networks = agent_configs['common_networks'] + # + # if 'f5_common_external_networks' in agent_configs: + # common_external_networks = ( + # agent_configs['f5_common_external_networks']) + # + # return (network['shared'] or + # (network['id'] in common_networks) or + # ('router:external' in network and + # network['router:external'] and + # common_external_networks)) def _valid_tenant_ids(self, network, lb_tenant_id, agent): if (network['tenant_id'] == lb_tenant_id): From aa12dad1df09fa800cae3a8564d3bd69e6b4a71d Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 13 Sep 2017 10:01:56 +0100 Subject: [PATCH 06/24] Fetch ESD name as part of service definition --- f5lbaasdriver/v2/bigip/driver_v2.py | 11 ++++++++++ f5lbaasdriver/v2/bigip/service_builder.py | 25 +++-------------------- 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/driver_v2.py b/f5lbaasdriver/v2/bigip/driver_v2.py index c8e4b28d..861e133d 100644 --- a/f5lbaasdriver/v2/bigip/driver_v2.py +++ b/f5lbaasdriver/v2/bigip/driver_v2.py @@ -38,6 +38,15 @@ from f5lbaasdriver.v2.bigip import neutron_client from f5lbaasdriver.v2.bigip import plugin_rpc +import urllib3 +import requests + +from requests.packages.urllib3.exceptions import InsecureRequestWarning + + +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + LOG = logging.getLogger(__name__) OPTS = [ @@ -75,6 +84,8 @@ class F5DriverV2(object): def __init__(self, plugin=None, env=None): """Driver initialization.""" + LOG.debug('F5 LBAAS driver initializing') + if not plugin: LOG.error('Required LBaaS Driver and Core Driver Missing') sys.exit(1) diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index b90137f9..6784abf5 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -331,28 +331,9 @@ def deserialize_agent_configurations(self, configurations): agent_conf = {} return agent_conf - @log_helpers.log_method_call + def _is_common_network(self, network, agent): return True - # common_external_networks = False - # common_networks = {} - # - # if agent and "configurations" in agent: - # agent_configs = self.deserialize_agent_configurations( - # agent['configurations']) - # - # if 'common_networks' in agent_configs: - # common_networks = agent_configs['common_networks'] - # - # if 'f5_common_external_networks' in agent_configs: - # common_external_networks = ( - # agent_configs['f5_common_external_networks']) - # - # return (network['shared'] or - # (network['id'] in common_networks) or - # ('router:external' in network and - # network['router:external'] and - # common_external_networks)) def _valid_tenant_ids(self, network, lb_tenant_id, agent): if (network['tenant_id'] == lb_tenant_id): @@ -435,7 +416,7 @@ def _get_listeners(self, context, loadbalancer): l7_policies=False ) listener_dict['l7_policies'] = \ - [{'id': l7_policy.id} for l7_policy in listener.l7_policies] + [{'id': l7_policy.id,'name':l7_policy.name} for l7_policy in listener.l7_policies] if listener.default_pool: listener_dict['default_pool_id'] = listener.default_pool.id @@ -510,7 +491,7 @@ def _pool_to_dict(self, pool): pool_dict['members'] = [{'id': member.id} for member in pool.members] pool_dict['listeners'] = [{'id': listener.id} for listener in pool.listeners] - pool_dict['l7_policies'] = [{'id': l7_policy.id} + pool_dict['l7_policies'] = [{'id': l7_policy.id,'name':l7_policy.name} for l7_policy in pool.l7_policies] if pool.session_persistence: pool_dict['session_persistence'] = ( From 507b63dd5aa43be0ad39153160d797e5579bed96 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 19 Sep 2017 10:23:43 +0100 Subject: [PATCH 07/24] Try to improve performance of get_all* queries --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 99e46614..f8099161 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -26,6 +26,9 @@ from neutron.extensions import portbindings from neutron.plugins.common import constants as plugin_constants from neutron_lbaas.db.loadbalancer import models +from neutron_lbaas import agent_scheduler +from neutron_lbaas.services.loadbalancer import data_models + from f5lbaasdriver.v2.bigip import constants_v2 as constants @@ -67,7 +70,7 @@ def get_active_loadbalancers_for_agent(self, context, host=None): return [] elif len(agents) > 1: LOG.warning('Multiple lbaas agents found on host %s' % host) - lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + lbs = self. self._list_loadbalancers_on_lbaas_agent( context, agents[0].id ) @@ -132,7 +135,7 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -165,7 +168,7 @@ def get_active_loadbalancers(self, context, env, group=None, host=None): ) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -199,7 +202,7 @@ def get_pending_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -235,7 +238,6 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos # get subnets on network and then filter based on the vip subnet. - with context.session.begin(subtransactions=True): agents = self.driver.scheduler.get_agents_in_env( context, @@ -244,7 +246,7 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos group) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -264,6 +266,20 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos return loadbalancers + + def _list_loadbalancers_on_lbaas_agent(self, context, id): + query = context.session.query(models.LoadBalancer) + + query.outerjoin(agent_scheduler.LoadbalancerAgentBinding,models.LoadBalancer==agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) + + query = query.filter(agent_scheduler.LoadbalancerAgentBinding.agent_id == id) + + lbs = [lb_db for lb_db in query] + + return lbs + + + @log_helpers.log_method_call def update_loadbalancer_stats(self, context, From 8e6e2f44cfec743ed044d91012a85c095334c0b2 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 19 Sep 2017 14:16:58 +0100 Subject: [PATCH 08/24] Block deletion of pools in case they attached to L7 policies. Not ideal, because its not handled well in the API but prevents redundant device config. --- f5lbaasdriver/v2/bigip/driver_v2.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/f5lbaasdriver/v2/bigip/driver_v2.py b/f5lbaasdriver/v2/bigip/driver_v2.py index 861e133d..bb8719f0 100644 --- a/f5lbaasdriver/v2/bigip/driver_v2.py +++ b/f5lbaasdriver/v2/bigip/driver_v2.py @@ -394,10 +394,24 @@ def update(self, context, old_pool, pool): def delete(self, context, pool): """Delete a pool.""" + if self._attached_to_policy(context,pool): + raise Exception("Cannot delete pool, attached to policy") + + + self.loadbalancer = pool.loadbalancer self.api_dict = self._get_pool_dict(pool) self._call_rpc(context, pool, 'delete_pool') + def _attached_to_policy(self, context, pool): + query = context.session.query(models.L7Policy) + query = query.filter((models.L7Policy).redirect_pool_id==pool.id) + + if query.count() > 0: + return True + + return False + class MemberManager(EntityManager): """MemberManager class handles Neutron LBaaS pool member CRUD.""" From 34334d8cff55b5801a469ed94942cd5a46b922a5 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 19 Sep 2017 15:46:39 +0100 Subject: [PATCH 09/24] Reset status if delete fails, unfortunately we have to assume ACTIVE, neutron_lbaas has already updated to PENDING DLEETE --- f5lbaasdriver/v2/bigip/driver_v2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/f5lbaasdriver/v2/bigip/driver_v2.py b/f5lbaasdriver/v2/bigip/driver_v2.py index bb8719f0..f0d159cd 100644 --- a/f5lbaasdriver/v2/bigip/driver_v2.py +++ b/f5lbaasdriver/v2/bigip/driver_v2.py @@ -395,6 +395,8 @@ def delete(self, context, pool): """Delete a pool.""" if self._attached_to_policy(context,pool): + self.driver.plugin.db.update_status(context, models.PoolV2, pool.id, + plugin_constants.ACTIVE) raise Exception("Cannot delete pool, attached to policy") From 53347b80f3b289901b441738693763b3b6c93369 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 20 Sep 2017 12:37:36 +0100 Subject: [PATCH 10/24] Fix query --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index f8099161..2f75b286 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -270,13 +270,16 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos def _list_loadbalancers_on_lbaas_agent(self, context, id): query = context.session.query(models.LoadBalancer) - query.outerjoin(agent_scheduler.LoadbalancerAgentBinding,models.LoadBalancer==agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) - query = query.filter(agent_scheduler.LoadbalancerAgentBinding.agent_id == id) + query = context.session.query(agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) + query = query.filter_by(agent_id=id) + loadbalancer_ids = [item[0] for item in query] + if loadbalancer_ids: + lbs = self.get_loadbalancers(context, + filters={'id': loadbalancer_ids}) + return [lb_db for lb_db in lbs] - lbs = [lb_db for lb_db in query] - - return lbs + return [] From 929676ff358862b146c1e480d34c05f8e9327e3e Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 20 Sep 2017 13:07:21 +0100 Subject: [PATCH 11/24] Fix call to DB --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 2f75b286..6a207e1f 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -275,7 +275,7 @@ def _list_loadbalancers_on_lbaas_agent(self, context, id): query = query.filter_by(agent_id=id) loadbalancer_ids = [item[0] for item in query] if loadbalancer_ids: - lbs = self.get_loadbalancers(context, + lbs = self.driver.plugin.db.get_loadbalancers(context, filters={'id': loadbalancer_ids}) return [lb_db for lb_db in lbs] From c1f63065ffa8452752afe596571e518cb009219c Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 20 Sep 2017 14:17:55 +0100 Subject: [PATCH 12/24] Avoid the n+1 in neutron lbaas --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 6a207e1f..fcac9171 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -275,7 +275,7 @@ def _list_loadbalancers_on_lbaas_agent(self, context, id): query = query.filter_by(agent_id=id) loadbalancer_ids = [item[0] for item in query] if loadbalancer_ids: - lbs = self.driver.plugin.db.get_loadbalancers(context, + lbs = self.driver.plugin.db._get_resources(context, models.LoadBalancer, filters={'id': loadbalancer_ids}) return [lb_db for lb_db in lbs] From 7bdfcc8d1518afb0424f3aaa9cf18925d8db8fdc Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Mon, 25 Sep 2017 14:04:55 +0100 Subject: [PATCH 13/24] revert, patched core neutron-lbaas to fix performance issue --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index fcac9171..652bad65 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -25,9 +25,8 @@ from neutron.db import agents_db from neutron.extensions import portbindings from neutron.plugins.common import constants as plugin_constants -from neutron_lbaas.db.loadbalancer import models from neutron_lbaas import agent_scheduler -from neutron_lbaas.services.loadbalancer import data_models + from f5lbaasdriver.v2.bigip import constants_v2 as constants @@ -70,7 +69,7 @@ def get_active_loadbalancers_for_agent(self, context, host=None): return [] elif len(agents) > 1: LOG.warning('Multiple lbaas agents found on host %s' % host) - lbs = self. self._list_loadbalancers_on_lbaas_agent( + lbs = self. self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( context, agents[0].id ) @@ -135,7 +134,7 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = self._list_loadbalancers_on_lbaas_agent( + agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -168,7 +167,7 @@ def get_active_loadbalancers(self, context, env, group=None, host=None): ) for agent in agents: - agent_lbs = self._list_loadbalancers_on_lbaas_agent( + agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -202,7 +201,7 @@ def get_pending_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = self._list_loadbalancers_on_lbaas_agent( + agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -246,7 +245,7 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos group) for agent in agents: - agent_lbs = self._list_loadbalancers_on_lbaas_agent( + agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -267,21 +266,6 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos - def _list_loadbalancers_on_lbaas_agent(self, context, id): - query = context.session.query(models.LoadBalancer) - - - query = context.session.query(agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) - query = query.filter_by(agent_id=id) - loadbalancer_ids = [item[0] for item in query] - if loadbalancer_ids: - lbs = self.driver.plugin.db._get_resources(context, models.LoadBalancer, - filters={'id': loadbalancer_ids}) - return [lb_db for lb_db in lbs] - - return [] - - @log_helpers.log_method_call def update_loadbalancer_stats(self, From 3f10de63437323726dc0f2449584d9e8d8b701da Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Mon, 25 Sep 2017 14:41:50 +0100 Subject: [PATCH 14/24] Revert "revert, patched core neutron-lbaas to fix performance issue" This reverts commit 7bdfcc8d1518afb0424f3aaa9cf18925d8db8fdc. --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 652bad65..fcac9171 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -25,8 +25,9 @@ from neutron.db import agents_db from neutron.extensions import portbindings from neutron.plugins.common import constants as plugin_constants +from neutron_lbaas.db.loadbalancer import models from neutron_lbaas import agent_scheduler - +from neutron_lbaas.services.loadbalancer import data_models from f5lbaasdriver.v2.bigip import constants_v2 as constants @@ -69,7 +70,7 @@ def get_active_loadbalancers_for_agent(self, context, host=None): return [] elif len(agents) > 1: LOG.warning('Multiple lbaas agents found on host %s' % host) - lbs = self. self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + lbs = self. self._list_loadbalancers_on_lbaas_agent( context, agents[0].id ) @@ -134,7 +135,7 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -167,7 +168,7 @@ def get_active_loadbalancers(self, context, env, group=None, host=None): ) for agent in agents: - agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -201,7 +202,7 @@ def get_pending_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -245,7 +246,7 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos group) for agent in agents: - agent_lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -266,6 +267,21 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos + def _list_loadbalancers_on_lbaas_agent(self, context, id): + query = context.session.query(models.LoadBalancer) + + + query = context.session.query(agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) + query = query.filter_by(agent_id=id) + loadbalancer_ids = [item[0] for item in query] + if loadbalancer_ids: + lbs = self.driver.plugin.db._get_resources(context, models.LoadBalancer, + filters={'id': loadbalancer_ids}) + return [lb_db for lb_db in lbs] + + return [] + + @log_helpers.log_method_call def update_loadbalancer_stats(self, From dc19bbc7d06ee5ec5eb46cc5e4b2ab2bcbaaf86a Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Fri, 29 Sep 2017 16:41:31 +0100 Subject: [PATCH 15/24] Add provisioning status to l7 policy diict --- f5lbaasdriver/v2/bigip/service_builder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 6784abf5..bb54f11e 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -416,7 +416,7 @@ def _get_listeners(self, context, loadbalancer): l7_policies=False ) listener_dict['l7_policies'] = \ - [{'id': l7_policy.id,'name':l7_policy.name} for l7_policy in listener.l7_policies] + [{'id': l7_policy.id,'name':l7_policy.name,'provisioning_status':l7_policy.provisioning_status} for l7_policy in listener.l7_policies] if listener.default_pool: listener_dict['default_pool_id'] = listener.default_pool.id @@ -491,7 +491,7 @@ def _pool_to_dict(self, pool): pool_dict['members'] = [{'id': member.id} for member in pool.members] pool_dict['listeners'] = [{'id': listener.id} for listener in pool.listeners] - pool_dict['l7_policies'] = [{'id': l7_policy.id,'name':l7_policy.name} + pool_dict['l7_policies'] = [{'id': l7_policy.id,'name':l7_policy.name,'provisioning_status':l7_policy.provisioning_status} for l7_policy in pool.l7_policies] if pool.session_persistence: pool_dict['session_persistence'] = ( From ce65619defb3488f8e4d3e3cc3e1a4718599a413 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 11 Oct 2017 09:30:34 +0100 Subject: [PATCH 16/24] =?UTF-8?q?Remove=20=C2=AE=20non=20ascii=20char?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CONTRIBUTING.md | 4 +- dev_install | 4 ++ docs/_static/f5-openstack-agent.gre.ini | 70 +++++++++---------- docs/_static/f5-openstack-agent.grm.ini | 70 +++++++++---------- docs/_static/f5-openstack-agent.vlan.ini | 70 +++++++++---------- docs/_static/f5-openstack-agent.vxlan.ini | 70 +++++++++---------- docs/coding-example-lbaasv2.rst | 6 +- docs/includes/ref_agent-config-file.rst | 6 +- .../ref_lbaasv2-version-compatibility.rst | 2 +- .../ref_neutron-to-bigip-configs-table.rst | 2 +- docs/includes/ref_prerequisites.rst | 2 +- .../topic_agent-redundancy-scaleout.rst | 6 +- .../includes/topic_basic-environment-reqs.rst | 4 +- .../topic_capacity-based-scaleout.rst | 2 +- docs/includes/topic_cert-manager.rst | 8 +-- docs/includes/topic_clustering.rst | 4 +- .../topic_configure-neutron-lbaasv2.rst | 6 +- .../includes/topic_device-driver-settings.rst | 14 ++-- .../topic_differentiated-services.rst | 4 +- docs/includes/topic_environment-generator.rst | 2 +- .../topic_f5lbaas-l7_content_switching.rst | 6 +- docs/includes/topic_f5lbaas-vcmp.rst | 16 ++--- docs/includes/topic_global-routed-mode.rst | 14 ++-- docs/includes/topic_ha-modes.rst | 4 +- .../topic_hierarchical-port-binding.rst | 2 +- .../topic_l2-l3-segmentation-modes.rst | 22 +++--- .../topic_lbaasv2-plugin-overview.rst | 2 +- docs/includes/topic_multi-tenancy.rst | 2 +- .../topic_neutron-bigip-command-mapping.rst | 2 +- .../topic_supported-features-intro.rst | 2 +- .../topic_upgrading-f5-lbaasv2-plugin.rst | 2 +- docs/map_before-you-begin.rst | 4 +- docs/map_f5-lbaasv2-user-guide.rst | 2 +- .../map_multi-agents-in-diff-environments.rst | 4 +- docs/troubleshooting.rst | 4 +- .../tempest/services/clients/bigip_client.py | 2 +- .../services/clients/l7policy_client.py | 2 +- .../tempest/services/clients/l7rule_client.py | 2 +- .../services/clients/plugin_rpc_client.py | 2 +- .../test/tempest/tests/api/test_esd.py | 2 +- .../test/tempest/tests/api/test_l7policy.py | 2 +- .../tempest/tests/api/test_l7policy_rules.py | 2 +- .../tempest/tests/api/test_l7policy_update.py | 2 +- f5lbaasdriver/v2/bigip/agent_rpc.py | 2 +- f5lbaasdriver/v2/bigip/agent_scheduler.py | 2 +- f5lbaasdriver/v2/bigip/constants_v2.py | 2 +- f5lbaasdriver/v2/bigip/driver_v2.py | 4 +- f5lbaasdriver/v2/bigip/exceptions.py | 2 +- f5lbaasdriver/v2/bigip/neutron_client.py | 2 +- f5lbaasdriver/v2/bigip/plugin_rpc.py | 2 +- f5lbaasdriver/v2/bigip/service_builder.py | 2 +- 51 files changed, 241 insertions(+), 237 deletions(-) create mode 100755 dev_install diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c52ae28d..45d4fc00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,7 +53,7 @@ $ py.test --cov ./ --cov-report=html $ open htmlcov/index.html ``` -If you are running our functional tests you will need a real BIG-IP® to run +If you are running our functional tests you will need a real BIG-IP to run them against, but you can get one of those pretty easily in [Amazon EC2](https://aws.amazon.com/marketplace/pp/B00JL3UASY/ref=srh_res_product_title?ie=UTF8&sr=0-10&qid=1449332167461). ## License @@ -72,5 +72,5 @@ See the License for the specific language governing permissions and limitations under the License. ### Contributor License Agreement -Individuals or business entities who contribute to this project must have completed and submitted the [F5® Contributor License Agreement](http://f5-openstack-docs.readthedocs.org/en/latest/cla_landing.html) to Openstack_CLA@f5.com prior to their code submission being included in this project. +Individuals or business entities who contribute to this project must have completed and submitted the [F5 Contributor License Agreement](http://f5-openstack-docs.readthedocs.org/en/latest/cla_landing.html) to Openstack_CLA@f5.com prior to their code submission being included in this project. diff --git a/dev_install b/dev_install new file mode 100755 index 00000000..e68d2cb6 --- /dev/null +++ b/dev_install @@ -0,0 +1,4 @@ +git init +python setup.py install + +/usr/local/bin/dumb-init /var/lib/kolla/venv/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini \ No newline at end of file diff --git a/docs/_static/f5-openstack-agent.gre.ini b/docs/_static/f5-openstack-agent.gre.ini index 50685500..d3443258 100644 --- a/docs/_static/f5-openstack-agent.gre.ini +++ b/docs/_static/f5-openstack-agent.gre.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -166,7 +166,7 @@ f5_external_physical_mappings = default:1.1:True # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format -# selfip addresses, one per BIG-IP® device, to use for VTEP addresses. +# selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -210,10 +210,10 @@ advertised_tunnel_types = gre # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -222,13 +222,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -239,22 +239,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -263,12 +263,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = False # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -318,14 +318,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -363,16 +363,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -391,7 +391,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -411,7 +411,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -430,7 +430,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -443,17 +443,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -503,7 +503,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/_static/f5-openstack-agent.grm.ini b/docs/_static/f5-openstack-agent.grm.ini index 45b878c8..4d4cfeda 100644 --- a/docs/_static/f5-openstack-agent.grm.ini +++ b/docs/_static/f5-openstack-agent.grm.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'Project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -165,7 +165,7 @@ f5_external_physical_mappings = default:1.1:True # # Device Tunneling (VTEP) Self IPs # -# This is the name of a BIG-IP® self IP address to use for VTEP addresses. +# This is the name of a BIG-IP self IP address to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -209,10 +209,10 @@ f5_external_physical_mappings = default:1.1:True # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -221,13 +221,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -238,22 +238,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -262,12 +262,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = True # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -317,14 +317,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -362,16 +362,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -390,7 +390,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -410,7 +410,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -429,7 +429,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -442,17 +442,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -502,7 +502,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # \ No newline at end of file diff --git a/docs/_static/f5-openstack-agent.vlan.ini b/docs/_static/f5-openstack-agent.vlan.ini index 3f2aa4d1..fb863bb8 100644 --- a/docs/_static/f5-openstack-agent.vlan.ini +++ b/docs/_static/f5-openstack-agent.vlan.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -166,7 +166,7 @@ f5_external_physical_mappings = default:1.1:True # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format -# selfip addresses, one per BIG-IP® device, to use for VTEP addresses. +# selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -210,10 +210,10 @@ advertised_tunnel_types = # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -222,13 +222,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -239,22 +239,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -263,12 +263,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = False # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -318,14 +318,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -363,16 +363,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -391,7 +391,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -411,7 +411,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -430,7 +430,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -443,17 +443,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -503,7 +503,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/_static/f5-openstack-agent.vxlan.ini b/docs/_static/f5-openstack-agent.vxlan.ini index 7bfd0815..9c1d252e 100644 --- a/docs/_static/f5-openstack-agent.vxlan.ini +++ b/docs/_static/f5-openstack-agent.vxlan.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -166,7 +166,7 @@ f5_external_physical_mappings = default:1.1:True # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format -# selfip addresses, one per BIG-IP® device, to use for VTEP addresses. +# selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -211,10 +211,10 @@ advertised_tunnel_types = vxlan # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -223,13 +223,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -240,22 +240,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -264,12 +264,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = False # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -319,14 +319,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -364,16 +364,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -392,7 +392,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -412,7 +412,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -431,7 +431,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -444,17 +444,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -504,7 +504,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/coding-example-lbaasv2.rst b/docs/coding-example-lbaasv2.rst index b22218fb..b8bce849 100644 --- a/docs/coding-example-lbaasv2.rst +++ b/docs/coding-example-lbaasv2.rst @@ -3,7 +3,7 @@ Coding Example ============== -We've provided some code examples below to help you get started with the F5® OpenStack LBaaSv2 agent and driver. This series demonstrates how to configure basic load balancing via the Neutron CLI. To access the full Neutron LBaaS command set, please see the `OpenStack CLI Documentation `_. LBaaSv2 commands all begin with ``lbaas``. +We've provided some code examples below to help you get started with the F5 OpenStack LBaaSv2 agent and driver. This series demonstrates how to configure basic load balancing via the Neutron CLI. To access the full Neutron LBaaS command set, please see the `OpenStack CLI Documentation `_. LBaaSv2 commands all begin with ``lbaas``. Create a load balancer @@ -70,9 +70,9 @@ The example command below shows how to create a listener that uses the ``TERMINA .. important:: - You must configure Barbican, Keystone, Neutron, and the F5® agent before you can create a tls load balancer. + You must configure Barbican, Keystone, Neutron, and the F5 agent before you can create a tls load balancer. See the `OpenStack LBaaS documentation `_ for further information and configuration instructions for the OpenStack pieces. - The necessary F5® agent configurations are described in :ref:`Certificate Manager / SSL Offloading`. + The necessary F5 agent configurations are described in :ref:`Certificate Manager / SSL Offloading`. diff --git a/docs/includes/ref_agent-config-file.rst b/docs/includes/ref_agent-config-file.rst index 248cb365..3ad233e4 100644 --- a/docs/includes/ref_agent-config-file.rst +++ b/docs/includes/ref_agent-config-file.rst @@ -3,14 +3,14 @@ Agent Configuration File ======================== -The agent configuration file -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- controls how the agent interacts with your BIG-IP®(s). The file contains detailed descriptions of each available configuration option. +The agent configuration file -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- controls how the agent interacts with your BIG-IP(s). The file contains detailed descriptions of each available configuration option. For reference, we've provided here a set of 'pre-configured' agent config files. These examples can help guide you in setting up the F5 agent to work with your specific environment. :ref:`Global Routed Mode` ------------------------- -Can be used for :term:`standalone`, :term:`overcloud` BIG-IP® VE deployments. +Can be used for :term:`standalone`, :term:`overcloud` BIG-IP VE deployments. * :download:`f5-openstack-agent.grm.ini <../_static/f5-openstack-agent.grm.ini>` @@ -18,7 +18,7 @@ Can be used for :term:`standalone`, :term:`overcloud` BIG-IP® VE deployments. :ref:`L2 Adjacent Mode` ------------------------------- -Can be used for :term:`standalone` or :term:`clustered` :term:`undercloud` BIG-IP® hardware or VE deployments. +Can be used for :term:`standalone` or :term:`clustered` :term:`undercloud` BIG-IP hardware or VE deployments. * :download:`f5-openstack-agent.gre.ini <../_static/f5-openstack-agent.gre.ini>` diff --git a/docs/includes/ref_lbaasv2-version-compatibility.rst b/docs/includes/ref_lbaasv2-version-compatibility.rst index 3460e01d..700be639 100644 --- a/docs/includes/ref_lbaasv2-version-compatibility.rst +++ b/docs/includes/ref_lbaasv2-version-compatibility.rst @@ -3,5 +3,5 @@ Release ------- -Release |version| is compatible with OpenStack |openstack|. For more information, please see the F5® OpenStack `Releases, Versioning, and Support Matrix `_. +Release |version| is compatible with OpenStack |openstack|. For more information, please see the F5 OpenStack `Releases, Versioning, and Support Matrix `_. diff --git a/docs/includes/ref_neutron-to-bigip-configs-table.rst b/docs/includes/ref_neutron-to-bigip-configs-table.rst index b1cfb5cf..3e4bdcc1 100644 --- a/docs/includes/ref_neutron-to-bigip-configs-table.rst +++ b/docs/includes/ref_neutron-to-bigip-configs-table.rst @@ -3,7 +3,7 @@ Neutron Command to BIG-IP Configuration Mapping Table ===================================================== -F5 LBaaSv2 uses the `f5-sdk `_ to communicate with BIG-IP via the iControl® REST API. The table below shows the corresponding iControl endpoint and BIG-IP object for each neutron lbaas- ‘create’ command. +F5 LBaaSv2 uses the `f5-sdk `_ to communicate with BIG-IP via the iControl REST API. The table below shows the corresponding iControl endpoint and BIG-IP object for each neutron lbaas- ‘create’ command. +----------------------------------------+-----------------------------------------------------------------------------------------+-----------------------------------+ | Command | URI | BIG-IP Configurations Applied | diff --git a/docs/includes/ref_prerequisites.rst b/docs/includes/ref_prerequisites.rst index b75e9ec5..66dec60e 100644 --- a/docs/includes/ref_prerequisites.rst +++ b/docs/includes/ref_prerequisites.rst @@ -56,7 +56,7 @@ - Three (3) VLANs :ref:`configured in Neutron ` -- 'mgmt', 'control', and 'data' -- to be used for system management, high availability (if desired), and data traffic, respectively. -- At least two (2) VLANs :ref:`configured in Neutron ` -- 'mgmt' and 'data' - to be used for BIG-IP® system management and client-server data traffic, respectively. +- At least two (2) VLANs :ref:`configured in Neutron ` -- 'mgmt' and 'data' - to be used for BIG-IP system management and client-server data traffic, respectively. - VLANs :ref:`configured in Neutron ` or `on the BIG-IP `_, as appropriate for your environment. diff --git a/docs/includes/topic_agent-redundancy-scaleout.rst b/docs/includes/topic_agent-redundancy-scaleout.rst index 4ac397b6..f128c6cb 100644 --- a/docs/includes/topic_agent-redundancy-scaleout.rst +++ b/docs/includes/topic_agent-redundancy-scaleout.rst @@ -10,7 +10,7 @@ Overview We refer to 'hosts' a lot in this document. A 'host' could be a Neutron controller, a compute node, a container, etc.; the important takeaway is that in order to run multiple agents in one environment, **each agent must have a unique** ``hostname``. [#]_ -When the Neutron LBaaS plugin loads the F5® LBaaSv2 driver, it creates a global messaging queue to be used for all callbacks and status update requests from F5 LBaaSv2 agents. Requests are passed from the global messaging queue to F5 LBaaSv2 drivers in a round-robin fashion, then passed on to an F5 agent as described in the :ref:`Agent-Tenant Affinity` section. +When the Neutron LBaaS plugin loads the F5 LBaaSv2 driver, it creates a global messaging queue to be used for all callbacks and status update requests from F5 LBaaSv2 agents. Requests are passed from the global messaging queue to F5 LBaaSv2 drivers in a round-robin fashion, then passed on to an F5 agent as described in the :ref:`Agent-Tenant Affinity` section. Agent-Tenant Affinity ````````````````````` @@ -83,7 +83,7 @@ To manage one BIG-IP device or device service group with multiple F5 agents, dep .. tip:: - * Be sure to provide the iControl® endpoints for all BIG-IP devices you'd like the agents to manage. + * Be sure to provide the iControl endpoints for all BIG-IP devices you'd like the agents to manage. * You can configure the F5 agent once, on the Neutron controller, then copy the agent config file (:file:`/etc/neutron/services/f5/f5-openstack-agent.ini`) over to the other hosts. #. :ref:`Start the F5 agent` on each host. @@ -102,5 +102,5 @@ Further Reading * :ref:`Multiple Agents and Differentiated Service Environments` -.. [#] **F5 Networks® does not provide support for container service deployments.** If you are already well versed in containerized environments, you can run one F5 agent per container. The neutron.conf file must be present in the container. The service provider driver does not need to run in the container; rather, it only needs to be in the container's build context. +.. [#] **F5 Networks does not provide support for container service deployments.** If you are already well versed in containerized environments, you can run one F5 agent per container. The neutron.conf file must be present in the container. The service provider driver does not need to run in the container; rather, it only needs to be in the container's build context. diff --git a/docs/includes/topic_basic-environment-reqs.rst b/docs/includes/topic_basic-environment-reqs.rst index 1a494b44..bbbd66ee 100644 --- a/docs/includes/topic_basic-environment-reqs.rst +++ b/docs/includes/topic_basic-environment-reqs.rst @@ -4,7 +4,7 @@ Basic Environment Requirements for F5 LBaaSv2 ============================================= -This document provides the minimum basic requirements for using F5® LBaaSv2 in OpenStack |openstack|. +This document provides the minimum basic requirements for using F5 LBaaSv2 in OpenStack |openstack|. OpenStack Requirements ---------------------- @@ -41,7 +41,7 @@ BIG-IP Requirements .. important:: - - You must have the appropriate `license`_ for the BIG-IP® features you wish to use. + - You must have the appropriate `license`_ for the BIG-IP features you wish to use. - All numbers shown in the table below are per BIG-IP device. diff --git a/docs/includes/topic_capacity-based-scaleout.rst b/docs/includes/topic_capacity-based-scaleout.rst index 47010a2c..dd741398 100644 --- a/docs/includes/topic_capacity-based-scaleout.rst +++ b/docs/includes/topic_capacity-based-scaleout.rst @@ -6,7 +6,7 @@ Capacity-Based Scale Out Overview -------- -When using :ref:`differentiated service environments `, you can configure capacity metrics for the F5® agent to provide scale out across multiple BIG-IP device groups. The F5 agent :ref:`configuration parameters ` ``environment_group_number`` and ``environment_capacity_score`` allow the F5 LBaaSv2 agent scheduler to assign requests to the group that has the lowest capacity score. +When using :ref:`differentiated service environments `, you can configure capacity metrics for the F5 agent to provide scale out across multiple BIG-IP device groups. The F5 agent :ref:`configuration parameters ` ``environment_group_number`` and ``environment_capacity_score`` allow the F5 LBaaSv2 agent scheduler to assign requests to the group that has the lowest capacity score. Each F5 agent expected to manage a specific :term:`device group` must be configured with the same ``icontrol_endpoints``. They must also be configured with the same ``environment_group_number``; this is used by the F5 LBaaSv2 driver to map the agents to the BIG-IP device group. The ``environment_group_number`` provides a convenient way for the F5 driver to identify agents that are available to handle requests for any of the devices in a given group. diff --git a/docs/includes/topic_cert-manager.rst b/docs/includes/topic_cert-manager.rst index 15782032..2208bbec 100644 --- a/docs/includes/topic_cert-manager.rst +++ b/docs/includes/topic_cert-manager.rst @@ -10,9 +10,9 @@ Overview OpenStack's 'Barbican' certificate manager provides a secure location where users can store sensitive information, such as SSH keys, private keys, certificates, and user passwords (referred to as "`secrets`_ " in OpenStack lingo). -The F5® agent uses Barbican certificates to perform :term:`SSL offloading` on BIG-IP®. It allows users to either create a new SSL profile, or to designate an existing `BIG-IP SSL profile`_ as the parent from which client profiles created for LBaaS objects will inherit settings. +The F5 agent uses Barbican certificates to perform :term:`SSL offloading` on BIG-IP. It allows users to either create a new SSL profile, or to designate an existing `BIG-IP SSL profile`_ as the parent from which client profiles created for LBaaS objects will inherit settings. -In general, SSL offloading frees up server and application capacity for handling traffic by shifting authentication processing from the target server to a designated authentication server. As shown in the diagram, once an admin user has added `secrets`_ to a Barbican container, he can use it to create a :ref:`TLS load balancer `. After the certificate data is validated, the F5® agent configures the load balancer on the BIG-IP. +In general, SSL offloading frees up server and application capacity for handling traffic by shifting authentication processing from the target server to a designated authentication server. As shown in the diagram, once an admin user has added `secrets`_ to a Barbican container, he can use it to create a :ref:`TLS load balancer `. After the certificate data is validated, the F5 agent configures the load balancer on the BIG-IP. .. figure:: ../media/LBaaS_cert-mgr_with-legend.jpg :alt: SSL Offloading with OpenStack Barbican, Neutron LBaaSv2, and BIG-IP @@ -107,7 +107,7 @@ Configuration 4. Set the BIG-IP parent SSL profile. - - ``f5_parent_ssl_profile``: The parent SSL profile on the BIG-IP® from which the agent SSL profile should inherit settings + - ``f5_parent_ssl_profile``: The parent SSL profile on the BIG-IP from which the agent SSL profile should inherit settings .. topic:: Example @@ -120,7 +120,7 @@ Configuration # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, - # and if it does not exist on your BIG-IP® system the agent will use the default + # and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/includes/topic_clustering.rst b/docs/includes/topic_clustering.rst index 0d6f5a86..8ccb4d9e 100644 --- a/docs/includes/topic_clustering.rst +++ b/docs/includes/topic_clustering.rst @@ -6,7 +6,7 @@ Manage BIG-IP Clusters with F5 LBaaSv2 Overview -------- -The F5® LBaaSv2 agent and driver can manage BIG-IP® :term:`device service clusters`, providing :term:`high availability`, :term:`mirroring`, and :term:`failover` services within your OpenStack cloud. +The F5 LBaaSv2 agent and driver can manage BIG-IP :term:`device service clusters`, providing :term:`high availability`, :term:`mirroring`, and :term:`failover` services within your OpenStack cloud. The F5 agent applies LBaaS configuration changes to each BIG-IP :term:`device` in a cluster at the same time, in real time. It is unnecessary to use BIG-IP's '`configuration synchronization`_ mode' to sync LBaaS objects managed by the agent across the devices in a cluster. @@ -100,7 +100,7 @@ Configuration # # -#. Add the IP address for each BIG-IP device, the admin username, and the admin password to the :ref:`Device Driver - iControl® Driver Setting ` section of the config file. Values must be comma-separated. +#. Add the IP address for each BIG-IP device, the admin username, and the admin password to the :ref:`Device Driver - iControl Driver Setting ` section of the config file. Values must be comma-separated. .. code-block:: text :emphasize-lines: 10 diff --git a/docs/includes/topic_configure-neutron-lbaasv2.rst b/docs/includes/topic_configure-neutron-lbaasv2.rst index 32eadecd..03d03547 100644 --- a/docs/includes/topic_configure-neutron-lbaasv2.rst +++ b/docs/includes/topic_configure-neutron-lbaasv2.rst @@ -5,9 +5,9 @@ Configure Neutron for LBaaSv2 ============================= -You will need to make a few configurations in your Neutron environment in order to use the F5® OpenStack LBaasv2 driver and agent. +You will need to make a few configurations in your Neutron environment in order to use the F5 OpenStack LBaasv2 driver and agent. -First, you'll need to set F5 Networks® as the Neutron LBaaSv2 service provider driver. Then, add the LBaaSv2 plugin to the list of service plugins in the Neutron configuration file. +First, you'll need to set F5 Networks as the Neutron LBaaSv2 service provider driver. Then, add the LBaaSv2 plugin to the list of service plugins in the Neutron configuration file. Set 'F5Networks' as the LBaaSv2 Service Provider ------------------------------------------------ @@ -25,7 +25,7 @@ Edit the ``service_providers`` section of :file:`/etc/neutron/neutron_lbaas.conf .. note:: - If there is an active entry for the F5® LBaaSv1 service provider driver, comment (#) it out. + If there is an active entry for the F5 LBaaSv1 service provider driver, comment (#) it out. Add the Neutron LBaaSv2 Service Plugin -------------------------------------- diff --git a/docs/includes/topic_device-driver-settings.rst b/docs/includes/topic_device-driver-settings.rst index a1bd5daa..c7e3cae7 100644 --- a/docs/includes/topic_device-driver-settings.rst +++ b/docs/includes/topic_device-driver-settings.rst @@ -8,9 +8,9 @@ Device Driver Settings / iControl Driver Settings Overview -------- -The Device Driver Settings in the :ref:`Agent Configuration File` provide the means of communication between the F5® agent and BIG-IP® device(s). **Do not change this setting**. +The Device Driver Settings in the :ref:`Agent Configuration File` provide the means of communication between the F5 agent and BIG-IP device(s). **Do not change this setting**. -The iControl® Driver Settings identify the BIG-IP device(s) that you want the F5 agent to manage and record the login information the agent will use to communicate with the BIG-IP(s). +The iControl Driver Settings identify the BIG-IP device(s) that you want the F5 agent to manage and record the login information the agent will use to communicate with the BIG-IP(s). Use Case -------- @@ -29,7 +29,7 @@ Prerequisites - Administrator access to both BIG-IP device(s) and OpenStack cloud. -- Basic understanding of `BIG-IP® system configuration `_. +- Basic understanding of `BIG-IP system configuration `_. - F5 :ref:`agent ` and :ref:`service provider driver ` installed on the Neutron controller and all other hosts for which you want to provision LBaaS services. @@ -37,7 +37,7 @@ Prerequisites Caveats ------- -- vCMP® is unsupported in this release (v |release|). +- vCMP is unsupported in this release (v |release|). Configuration @@ -62,7 +62,7 @@ Configuration :emphasize-lines: 17, 31, 36 ############################################################################### - # Device Driver - iControl® Driver Setting + # Device Driver - iControl Driver Setting ############################################################################### # # This setting can be either a single IP address or a @@ -73,13 +73,13 @@ Configuration # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. - # In order to access devices' iControl® interfaces via + # In order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.7.232 \\ replace with the IP address(es) of your BIG-IP(s) # - # If you are using vCMP® with VLANs, you will need to configure + # If you are using vCMP with VLANs, you will need to configure # your vCMP host addresses, in addition to the guests addresses. # vCMP Host access is necessary for provisioning VLANs to a guest. # Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname diff --git a/docs/includes/topic_differentiated-services.rst b/docs/includes/topic_differentiated-services.rst index 0ea09620..af2454c6 100644 --- a/docs/includes/topic_differentiated-services.rst +++ b/docs/includes/topic_differentiated-services.rst @@ -6,9 +6,9 @@ Differentiated Service Environments Overview -------- -The F5® LBaaSv2 driver and F5 agent can manage multiple BIG-IP environments. In a :dfn:`differentiated service environment` -- a uniquely-named environment for which dedicated F5 LBaaS services are required -- the F5 driver has its own, uniquely-named messaging queue. The F5 LBaaS agent scheduler for a differentiated service environment can only assign tasks to agents running in that environment. +The F5 LBaaSv2 driver and F5 agent can manage multiple BIG-IP environments. In a :dfn:`differentiated service environment` -- a uniquely-named environment for which dedicated F5 LBaaS services are required -- the F5 driver has its own, uniquely-named messaging queue. The F5 LBaaS agent scheduler for a differentiated service environment can only assign tasks to agents running in that environment. -The service environment corresponds to the ``environment_prefix`` parameter in the :ref:`agent configuration file`. when you create a new ``lbaas-loadbalancer`` in OpenStack, this prefix is prepended to the OpenStack tenant id and used to create a new partition on your BIG-IP® device(s). The default ``environment_prefix`` parameter is ``Project``. +The service environment corresponds to the ``environment_prefix`` parameter in the :ref:`agent configuration file`. when you create a new ``lbaas-loadbalancer`` in OpenStack, this prefix is prepended to the OpenStack tenant id and used to create a new partition on your BIG-IP device(s). The default ``environment_prefix`` parameter is ``Project``. Differentiated service environments can be used in conjunction with :ref:`capacity-based scale out` to provide agent redundancy and scale out across BIG-IP device groups. diff --git a/docs/includes/topic_environment-generator.rst b/docs/includes/topic_environment-generator.rst index 11b12c82..bc88c589 100644 --- a/docs/includes/topic_environment-generator.rst +++ b/docs/includes/topic_environment-generator.rst @@ -6,7 +6,7 @@ F5 Environment Generator Overview -------- -The F5® environment generator is a Python utility that creates new service provider drivers and adds them to the Neutron LBaaS configuration file (:file:`/etc/neutron/neutron_lbaas.conf`). +The F5 environment generator is a Python utility that creates new service provider drivers and adds them to the Neutron LBaaS configuration file (:file:`/etc/neutron/neutron_lbaas.conf`). Use Case -------- diff --git a/docs/includes/topic_f5lbaas-l7_content_switching.rst b/docs/includes/topic_f5lbaas-l7_content_switching.rst index ce1261d1..9edb2842 100644 --- a/docs/includes/topic_f5lbaas-l7_content_switching.rst +++ b/docs/includes/topic_f5lbaas-l7_content_switching.rst @@ -73,13 +73,13 @@ L7 policies are ranked by a position value and are evaluated according to their Send request to default pool -OpenStack Policy/Rules Definition Versus BIG-IP® Policy/Rules: +OpenStack Policy/Rules Definition Versus BIG-IP Policy/Rules: `````````````````````````````````````````````````````````````` The Neutron L7 terminology does not directly align with the common vocabulary of BIG-IP Local Traffic Manager. In the BIG-IP LTM, policies also have a set of rules, but it is the rules that specify actions and not the policy. Also, policies attached to a virtual server on the BIG-IP are all evaluated regardless of the truth of the associated rules. In addition to this difference the BIG-IP policies have no ordinal, it is the BIG-IP rules that have this attribute. Because of these confusing differences it is useful to attempt to define the terms as they apply to each domain. +------------------+-------------------------------+ - | Neutron LBaaS L7 | BIG-IP® Local Traffic Manager | + | Neutron LBaaS L7 | BIG-IP Local Traffic Manager | +==================+===============================+ | Policy | Policy Rules (wrapper_policy) | +------------------+-------------------------------+ @@ -197,7 +197,7 @@ Configuration .. code-block:: text - # The resulting BIG-IP® LTM Policy configuration from the steps above. + # The resulting BIG-IP LTM Policy configuration from the steps above. ltm policy wrapper_policy { controls { forwarding } last-modified 2016-12-05:09:19:05 diff --git a/docs/includes/topic_f5lbaas-vcmp.rst b/docs/includes/topic_f5lbaas-vcmp.rst index 5b33df84..da3a0d6a 100644 --- a/docs/includes/topic_f5lbaas-vcmp.rst +++ b/docs/includes/topic_f5lbaas-vcmp.rst @@ -4,7 +4,7 @@ F5 LBaaSv2 and vCMP Overview -------- -Virtual Clustered Multiprocessing™ (vCMP®) is a feature of the BIG-IP® system that allows you to run multiple instances of BIG-IP software on a single hardware platform. vCMP allocates a specific share of the hardware resources to each BIG-IP® instance, or :term:`vCMP guest`. +Virtual Clustered Multiprocessing™ (vCMP) is a feature of the BIG-IP system that allows you to run multiple instances of BIG-IP software on a single hardware platform. vCMP allocates a specific share of the hardware resources to each BIG-IP instance, or :term:`vCMP guest`. A vCMP guest consists of a TMOS instance and one or more BIG-IP modules. The :term:`vCMP host` allocates a share of the hardware resources to each guest; each guest also has its own management IP address, self IP addresses, virtual servers, and so on. In this way, each guest can effectively receive and process application traffic with no knowledge of other guests on the system. @@ -42,11 +42,11 @@ Configuration .. code-block:: text :emphasize-lines: 8 - # If you are using vCMP® with VLANs, you will need to configure - # your vCMP® host addresses, in addition to the guests addresses. - # vCMP® Host access is necessary for provisioning VLANs to a guest. - # Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname - # for vCMP® hosts. The plug-in will automatically determine + # If you are using vCMP with VLANs, you will need to configure + # your vCMP host addresses, in addition to the guests addresses. + # vCMP Host access is necessary for provisioning VLANs to a guest. + # Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname + # for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # icontrol_vcmp_hostname = 192.168.1.245 @@ -58,7 +58,7 @@ Configuration :emphasize-lines: 19 ############################################################################### - # Device Driver - iControl® Driver Setting + # Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -71,7 +71,7 @@ Configuration # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. - # If order to access devices' iControl® interfaces via + # If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # diff --git a/docs/includes/topic_global-routed-mode.rst b/docs/includes/topic_global-routed-mode.rst index 634bcf4e..ff3e3198 100644 --- a/docs/includes/topic_global-routed-mode.rst +++ b/docs/includes/topic_global-routed-mode.rst @@ -8,7 +8,7 @@ Global Routed Mode Overview -------- -The F5® agent determines BIG-IP® devices' L2 and L3 network configurations based on the settings provided in the :ref:`L2/L3 segmentation modes ` settings in the :ref:`agent configuration file`. When configured to use global routed mode, the F5 agent makes the following assumptions: +The F5 agent determines BIG-IP devices' L2 and L3 network configurations based on the settings provided in the :ref:`L2/L3 segmentation modes ` settings in the :ref:`agent configuration file`. When configured to use global routed mode, the F5 agent makes the following assumptions: #. LBaaS objects are accessible via global L3 routes; #. All virtual IPs are routable from clients; @@ -33,7 +33,7 @@ Global routed mode is generally used for :term:`undercloud` BIG-IP hardware depl Example BIG-IP 'undercloud' deployment -Global routed mode uses BIG-IP `secure network address translation`_ (SNAT) 'automapping' to map one or more origin IP addresses to a pool of translation addresses. The pool is created by the BIG-IP Local Traffic Manager® (LTM) from existing `self IP`_ addresses. This means that *before* you configure the F5 agent to use global routed mode, you should create enough `self IP`_ addresses on the BIG-IP(s) to handle anticipated connection loads. [#]_ You do not need to configure a SNAT pool, as one will be created automatically. +Global routed mode uses BIG-IP `secure network address translation`_ (SNAT) 'automapping' to map one or more origin IP addresses to a pool of translation addresses. The pool is created by the BIG-IP Local Traffic Manager (LTM) from existing `self IP`_ addresses. This means that *before* you configure the F5 agent to use global routed mode, you should create enough `self IP`_ addresses on the BIG-IP(s) to handle anticipated connection loads. [#]_ You do not need to configure a SNAT pool, as one will be created automatically. Prerequisites ------------- @@ -87,18 +87,18 @@ Configuration # L3 Segmentation Mode Settings ############################################################################### # - # Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® + # Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device - # L3 routes, which must be already provisioned on the BIG-IP®s. + # L3 routes, which must be already provisioned on the BIG-IPs. # ... # f5_global_routed_mode = True # # Allow overlapping IP subnets across multiple tenants. - # This creates route domains on BIG-IP® in order to + # This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -115,14 +115,14 @@ Configuration # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not - # be created (routed mode) and the BIG-IP® + # be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will no longer work - # if the same BIG-IP® device is not being used + # if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if diff --git a/docs/includes/topic_ha-modes.rst b/docs/includes/topic_ha-modes.rst index edb5590a..ea84f9d3 100644 --- a/docs/includes/topic_ha-modes.rst +++ b/docs/includes/topic_ha-modes.rst @@ -6,7 +6,7 @@ HA mode Overview -------- -:term:`HA`, or, 'high availability', mode refers to high availability of the BIG-IP® device(s). The F5® agent can configure BIG-IP to operate in :term:`standalone`, :term:`pair`, or :term:`scalen` mode. The F5 agent configures LBaaS objects on HA BIG-IP devices in real time. +:term:`HA`, or, 'high availability', mode refers to high availability of the BIG-IP device(s). The F5 agent can configure BIG-IP to operate in :term:`standalone`, :term:`pair`, or :term:`scalen` mode. The F5 agent configures LBaaS objects on HA BIG-IP devices in real time. Use Case -------- @@ -41,7 +41,7 @@ Prerequisites - Basic understanding of OpenStack networking concepts. See the `OpenStack docs `_ for more information. -- Basic understanding of `BIG-IP® Local Traffic Management `_ +- Basic understanding of `BIG-IP Local Traffic Management `_ - F5 :ref:`agent ` and :ref:`service provider driver ` installed on the Neutron controller and all other hosts from which you want to provision LBaaS services. diff --git a/docs/includes/topic_hierarchical-port-binding.rst b/docs/includes/topic_hierarchical-port-binding.rst index 5e3cb561..a7daee87 100644 --- a/docs/includes/topic_hierarchical-port-binding.rst +++ b/docs/includes/topic_hierarchical-port-binding.rst @@ -4,7 +4,7 @@ Hierarchical Port Binding Overview -------- -Neutron `hierarchical port binding`_ [#]_ allows software-defined networking (SDN) users to dynamically configure VLANs and VLAN tags for a physical BIG-IP® :term:`device` or :term:`device service cluster` connected to a 'top of rack' L3 switch (a network 'segment'). Telling the F5® agent what physical switch and port the BIG-IPs are connected to allows the agent to configure the BIG-IPs to process traffic for networks that are dynamically created in that segment. +Neutron `hierarchical port binding`_ [#]_ allows software-defined networking (SDN) users to dynamically configure VLANs and VLAN tags for a physical BIG-IP :term:`device` or :term:`device service cluster` connected to a 'top of rack' L3 switch (a network 'segment'). Telling the F5 agent what physical switch and port the BIG-IPs are connected to allows the agent to configure the BIG-IPs to process traffic for networks that are dynamically created in that segment. Disconnected Services ````````````````````` diff --git a/docs/includes/topic_l2-l3-segmentation-modes.rst b/docs/includes/topic_l2-l3-segmentation-modes.rst index 82baece8..87404229 100644 --- a/docs/includes/topic_l2-l3-segmentation-modes.rst +++ b/docs/includes/topic_l2-l3-segmentation-modes.rst @@ -6,7 +6,7 @@ L2 Adjacent Mode Overview -------- -The F5® agent uses the L2/L3 segmentation mode settings to determine the L2/L3 network configurations for your BIG-IP® device(s). +The F5 agent uses the L2/L3 segmentation mode settings to determine the L2/L3 network configurations for your BIG-IP device(s). .. warning:: @@ -142,7 +142,7 @@ Device VLAN to interface and tag mapping VLAN device and interface to port mappings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``vlan_binding_driver``: Binds tagged VLANs to specific BIG-IP ports; it should be configured using a valid subclass of the iControl® :class:`VLANBindingBase` class. [#]_ **To use this feature, uncomment the line in the :ref:`agent configuration file`.** +- ``vlan_binding_driver``: Binds tagged VLANs to specific BIG-IP ports; it should be configured using a valid subclass of the iControl :class:`VLANBindingBase` class. [#]_ **To use this feature, uncomment the line in the :ref:`agent configuration file`.** Device Tunneling (VTEP) selfips @@ -160,7 +160,7 @@ Device Tunneling (VTEP) selfips # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format - # selfip addresses, one per BIG-IP® device, to use for VTEP addresses. + # selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -228,10 +228,10 @@ Static ARP population for members on tunnel networks # f5_populate_static_arp = True # ... - # This is a boolean entry which determines if the BIG-IP® will use + # This is a boolean entry which determines if the BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are - # setup. If the BIG-IP® agent and other tunnel agents don't match + # setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -256,7 +256,7 @@ Namespaces and Routing :emphasize-lines: 8 # Allow overlapping IP subnets across multiple tenants. - # This creates route domains on BIG-IP® in order to + # This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -340,14 +340,14 @@ SNAT Mode and SNAT Address Counts # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not - # be created (routed mode) and the BIG-IP® + # be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will no longer work - # if the same BIG-IP® device is not being used + # if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -396,16 +396,16 @@ Common Networks # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted - # on the BIG-IP®, but rather assumed that the value + # on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined - # on the BIG-IP® prior to LBaaS configuration. The + # on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # - # If your Internet VLAN on your BIG-IP® is named + # If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: diff --git a/docs/includes/topic_lbaasv2-plugin-overview.rst b/docs/includes/topic_lbaasv2-plugin-overview.rst index b0feb9da..e15b63fd 100644 --- a/docs/includes/topic_lbaasv2-plugin-overview.rst +++ b/docs/includes/topic_lbaasv2-plugin-overview.rst @@ -3,7 +3,7 @@ Overview -------- -The F5® OpenStack LBaaSv2 service provider driver and agent (also called, simply, 'F5 LBaaSv2') make it possible to provision F5 BIG-IP® `Local Traffic Manager `_ (LTM®) services in an OpenStack cloud. +The F5 OpenStack LBaaSv2 service provider driver and agent (also called, simply, 'F5 LBaaSv2') make it possible to provision F5 BIG-IP `Local Traffic Manager `_ (LTM) services in an OpenStack cloud. How the plugin works diff --git a/docs/includes/topic_multi-tenancy.rst b/docs/includes/topic_multi-tenancy.rst index 8ff2bc36..2a386e33 100644 --- a/docs/includes/topic_multi-tenancy.rst +++ b/docs/includes/topic_multi-tenancy.rst @@ -6,7 +6,7 @@ Manage Multi-Tenant BIG-IP Devices with F5 LBaaSv2 Overview -------- -BIG-IP® devices allow users to create and customize partitions for which specific features that meet a tenant's needs can be enabled. This type of configuration, called multi-tenancy, allows a greater degree of flexibility in allocating network resources to multiple individual projects. [#]_ +BIG-IP devices allow users to create and customize partitions for which specific features that meet a tenant's needs can be enabled. This type of configuration, called multi-tenancy, allows a greater degree of flexibility in allocating network resources to multiple individual projects. [#]_ .. figure:: ../media/f5-lbaas-multi-tenancy.png :alt: Multi-tenant BIG-IP and F5 LBaaS diff --git a/docs/includes/topic_neutron-bigip-command-mapping.rst b/docs/includes/topic_neutron-bigip-command-mapping.rst index dfa5047e..8de834af 100644 --- a/docs/includes/topic_neutron-bigip-command-mapping.rst +++ b/docs/includes/topic_neutron-bigip-command-mapping.rst @@ -6,7 +6,7 @@ F5 LBaaSv2 to BIG-IP Configuration Mapping Overview -------- -When you issue ``neutron lbaas`` commands on your OpenStack Neutron controller or host, the F5® LBaaSv2 driver and F5 agent configure objects on your BIG-IP® device(s). Here, we've provided some insight into what exactly happens behind the scenes to configure BIG-IP objects. You can also view the actual calls made by setting the F5 agent's DEBUG level to 'True' in the :ref:`agent configuration file` and viewing the logs (:file:`/var/log/neutron/f5-openstack-agent.log`). +When you issue ``neutron lbaas`` commands on your OpenStack Neutron controller or host, the F5 LBaaSv2 driver and F5 agent configure objects on your BIG-IP device(s). Here, we've provided some insight into what exactly happens behind the scenes to configure BIG-IP objects. You can also view the actual calls made by setting the F5 agent's DEBUG level to 'True' in the :ref:`agent configuration file` and viewing the logs (:file:`/var/log/neutron/f5-openstack-agent.log`). .. include:: ref_neutron-to-bigip-configs-table.rst :start-line: 5 diff --git a/docs/includes/topic_supported-features-intro.rst b/docs/includes/topic_supported-features-intro.rst index 57d49588..686fa93a 100644 --- a/docs/includes/topic_supported-features-intro.rst +++ b/docs/includes/topic_supported-features-intro.rst @@ -1,4 +1,4 @@ :orphan: true -The :ref:`agent configuration file` -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- provides the mechanism for identifying your BIG-IP® device(s) to F5 LBaaSv2 and allowing the agent to discover and configure BIG-IP network (``/net``) and Local Traffic Manager® (``/ltm``) objects. +The :ref:`agent configuration file` -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- provides the mechanism for identifying your BIG-IP device(s) to F5 LBaaSv2 and allowing the agent to discover and configure BIG-IP network (``/net``) and Local Traffic Manager (``/ltm``) objects. diff --git a/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst b/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst index 7075c37f..23866aef 100644 --- a/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst +++ b/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst @@ -3,7 +3,7 @@ Upgrading the F5 LBaaSv2 Components =================================== -If you are upgrading from an earlier version, F5® recommends that you uninstall the current version, then install the new version. +If you are upgrading from an earlier version, F5 recommends that you uninstall the current version, then install the new version. .. warning:: diff --git a/docs/map_before-you-begin.rst b/docs/map_before-you-begin.rst index c5ceb84d..5d5b0e1e 100644 --- a/docs/map_before-you-begin.rst +++ b/docs/map_before-you-begin.rst @@ -1,11 +1,11 @@ Before You Begin ================ -In order to use F5® LBaaSv2 services, you will need the following: +In order to use F5 LBaaSv2 services, you will need the following: - Operational OpenStack cloud (|openstack| release). -- Licensed, operational BIG-IP® :term:`device` or :term:`device cluster`; can be deployed either as an OpenStack instance (BIG-IP VE) or external to the cloud (VE or hardware). +- Licensed, operational BIG-IP :term:`device` or :term:`device cluster`; can be deployed either as an OpenStack instance (BIG-IP VE) or external to the cloud (VE or hardware). .. important:: diff --git a/docs/map_f5-lbaasv2-user-guide.rst b/docs/map_f5-lbaasv2-user-guide.rst index b7440a97..8ecfdecb 100644 --- a/docs/map_f5-lbaasv2-user-guide.rst +++ b/docs/map_f5-lbaasv2-user-guide.rst @@ -3,7 +3,7 @@ F5 OpenStack LBaaSv2 User Guide ############################### -This guide provides instructions for installing and using the F5® OpenStack LBaaSv2 service provider driver and agent (also called, collectively, 'F5 LBaaSv2'). +This guide provides instructions for installing and using the F5 OpenStack LBaaSv2 service provider driver and agent (also called, collectively, 'F5 LBaaSv2'). .. include:: includes/ref_lbaasv2-version-compatibility.rst :start-line: 5 diff --git a/docs/map_multi-agents-in-diff-environments.rst b/docs/map_multi-agents-in-diff-environments.rst index 5126611a..bd2f83d6 100644 --- a/docs/map_multi-agents-in-diff-environments.rst +++ b/docs/map_multi-agents-in-diff-environments.rst @@ -4,7 +4,7 @@ Multiple Agents and Differentiated Service Environments Overview -------- -You can run :ref:`multiple F5® agents ` on separate hosts in OpenStack to provide agent redundancy and scale out. Additionally, you can set up custom :ref:`service environments ` in your OpenStack cloud to manage environments with different requirements and/or configurations. +You can run :ref:`multiple F5 agents ` on separate hosts in OpenStack to provide agent redundancy and scale out. Additionally, you can set up custom :ref:`service environments ` in your OpenStack cloud to manage environments with different requirements and/or configurations. Use Case -------- @@ -38,7 +38,7 @@ Configuration #. :ref:`Configure the F5 agents `. - * Each agent must be configured with the same iControl® endpoint(s). + * Each agent must be configured with the same iControl endpoint(s). * Each agent must be configured with the same ``environment_prefix``; this is the name you assigned to the new custom environment. * Each agent must run on a separate host (in other words, the hostname must be unique). diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index 456ab63e..3f760ef4 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -12,7 +12,7 @@ Troubleshooting Set Logging Level to DEBUG -------------------------- -To troubleshoot general problems, set the Neutron and the F5® agent ``debug`` setting to ``True``. +To troubleshoot general problems, set the Neutron and the F5 agent ``debug`` setting to ``True``. Extensive logging will then appear in the ``neutron-server`` and ``f5-oslbaasv1-agent`` log files on their respective hosts. @@ -61,7 +61,7 @@ Here are a few things you can try: $ sudo service f5-oslbaasv2-agent status \\ Ubuntu -3. Make sure you can connect to the BIG-IP® and that the iControl® hostname, username, and password in the :ref:`agent configuration file` are correct. +3. Make sure you can connect to the BIG-IP and that the iControl hostname, username, and password in the :ref:`agent configuration file` are correct. 4. If you're using ``global_routed_mode``, comment out (#) the ``vtep`` lines (shown below) in the :ref:`agent configuration file`. diff --git a/f5lbaasdriver/test/tempest/services/clients/bigip_client.py b/f5lbaasdriver/test/tempest/services/clients/bigip_client.py index 50e5c482..1191393c 100644 --- a/f5lbaasdriver/test/tempest/services/clients/bigip_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/bigip_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests.""" +u"""F5 Networks LBaaSv2 L7 rules client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py b/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py index 44d7bf8f..1a7b276b 100644 --- a/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests.""" +u"""F5 Networks LBaaSv2 L7 rules client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py b/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py index 7fd47280..fe039a7b 100644 --- a/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests.""" +u"""F5 Networks LBaaSv2 L7 rules client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py b/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py index b4acb187..8e9c177f 100644 --- a/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 plugin_rpc client for tempest tests.""" +u"""F5 Networks LBaaSv2 plugin_rpc client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_esd.py b/f5lbaasdriver/test/tempest/tests/api/test_esd.py index e01b86ae..e27c092f 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_esd.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_esd.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py b/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py index 5a877d78..b6548b21 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py index 0b761553..b77dd81e 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py index f547494d..fe68f3a8 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy rules tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy rules tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/agent_rpc.py b/f5lbaasdriver/v2/bigip/agent_rpc.py index 4870ad34..3e6d11a6 100644 --- a/f5lbaasdriver/v2/bigip/agent_rpc.py +++ b/f5lbaasdriver/v2/bigip/agent_rpc.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""RPC Calls to Agents for F5® LBaaSv2.""" +u"""RPC Calls to Agents for F5 LBaaSv2.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/agent_scheduler.py b/f5lbaasdriver/v2/bigip/agent_scheduler.py index 712e629f..9f572bf6 100644 --- a/f5lbaasdriver/v2/bigip/agent_scheduler.py +++ b/f5lbaasdriver/v2/bigip/agent_scheduler.py @@ -150,7 +150,7 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): # There is no existing loadbalancer agent binding. # Find all active agent candidates in this env. - # We use environment_prefix to find F5® agents + # We use environment_prefix to find F5 agents # rather then map to the agent binary name. candidates = self.get_agents_in_env( context, diff --git a/f5lbaasdriver/v2/bigip/constants_v2.py b/f5lbaasdriver/v2/bigip/constants_v2.py index aaa57d99..6bdf4a91 100644 --- a/f5lbaasdriver/v2/bigip/constants_v2.py +++ b/f5lbaasdriver/v2/bigip/constants_v2.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""Constants for F5® LBaaSv2 Driver.""" +u"""Constants for F5 LBaaSv2 Driver.""" # coding=utf-8 # Copyright 2016 F5 Networks Inc. # diff --git a/f5lbaasdriver/v2/bigip/driver_v2.py b/f5lbaasdriver/v2/bigip/driver_v2.py index f0d159cd..60ac2f1b 100644 --- a/f5lbaasdriver/v2/bigip/driver_v2.py +++ b/f5lbaasdriver/v2/bigip/driver_v2.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 Driver Implementation.""" +u"""F5 Networks LBaaSv2 Driver Implementation.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -80,7 +80,7 @@ def __str__(self): class F5DriverV2(object): - u"""F5 Networks® LBaaSv2 Driver.""" + u"""F5 Networks LBaaSv2 Driver.""" def __init__(self, plugin=None, env=None): """Driver initialization.""" diff --git a/f5lbaasdriver/v2/bigip/exceptions.py b/f5lbaasdriver/v2/bigip/exceptions.py index ad574f25..04f2d26a 100644 --- a/f5lbaasdriver/v2/bigip/exceptions.py +++ b/f5lbaasdriver/v2/bigip/exceptions.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 Exceptions.""" +u"""F5 Networks LBaaSv2 Exceptions.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/neutron_client.py b/f5lbaasdriver/v2/bigip/neutron_client.py index c64bc591..23ba172d 100644 --- a/f5lbaasdriver/v2/bigip/neutron_client.py +++ b/f5lbaasdriver/v2/bigip/neutron_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""Service Module for F5® LBaaSv2.""" +u"""Service Module for F5 LBaaSv2.""" # Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index fcac9171..3fb30a08 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""RPC Callbacks for F5® LBaaSv2 Plugins.""" +u"""RPC Callbacks for F5 LBaaSv2 Plugins.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index bb54f11e..c8ceaf3b 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""Service Module for F5® LBaaSv2.""" +u"""Service Module for F5 LBaaSv2.""" # Copyright 2014-2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); From 4c8230374c5ab3890eda9241bfe15e7de08df07e Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 15 May 2018 15:00:04 +0200 Subject: [PATCH 17/24] Added some new RPC functions needed by F5 agent. --- dev_install | 2 +- f5lbaasdriver/v2/bigip/plugin_rpc.py | 105 +++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 1 deletion(-) diff --git a/dev_install b/dev_install index e68d2cb6..cd325622 100755 --- a/dev_install +++ b/dev_install @@ -1,4 +1,4 @@ git init python setup.py install -/usr/local/bin/dumb-init /var/lib/kolla/venv/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini \ No newline at end of file +dumb-init neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr1k.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini \ No newline at end of file diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 3fb30a08..ec0b5aa7 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -777,3 +777,108 @@ def remove_allowed_address(self, context, port_id=None, ip_address=None): except Exception as exc: LOG.error('could not remove allowed address pair: %s' % exc.message) + + # validate a list of loadbalancer id - assure they are not deleted + @log_helpers.log_method_call + def validate_loadbalancers_state(self, context, loadbalancers, host=None): + lb_status = {} + for lbid in loadbalancers: + with context.session.begin(subtransactions=True): + try: + lb_db = self.driver.plugin.db.get_loadbalancer(context, + lbid) + lb_status[lbid] = lb_db.provisioning_status + + except Exception as e: + LOG.error('Exception: get_loadbalancer: %s', + e.message) + lb_status[lbid] = 'Unknown' + return lb_status + + # validate a list of pools id - assure they are not deleted + @log_helpers.log_method_call + def validate_pools_state(self, context, pools, host=None): + pool_status = {} + for poolid in pools: + with context.session.begin(subtransactions=True): + try: + pool_db = self.driver.plugin.db.get_pool(context, poolid) + pool_status[poolid] = pool_db.provisioning_status + except Exception as e: + LOG.error('Exception: get_pool: %s', + e.message) + pool_status[poolid] = 'Unknown' + return pool_status + + @log_helpers.log_method_call + def get_pools_members(self, context, pools, host=None): + pools_members = dict() + for poolid in pools: + members = self.driver.plugin.db.get_pool_members( + context, + filters={'pool_id': [poolid]} + ) + pools_members[poolid] = [member.to_dict(pool=False) + for member in members] + return pools_members + + # validate a list of listeners id - assure they are not deleted + @log_helpers.log_method_call + def validate_listeners_state(self, context, listeners, host=None): + listener_status = {} + for listener_id in listeners: + with context.session.begin(subtransactions=True): + try: + listener_db = \ + self.driver.plugin.db.get_listener(context, + listener_id) + listener_status[listener_id] = \ + listener_db.provisioning_status + except Exception as e: + LOG.error('Exception: get_listener: %s', + e.message) + listener_status[listener_id] = 'Unknown' + return listener_status + + # validate a list of l7policys id - assure they are not deleted + @log_helpers.log_method_call + def validate_l7policys_state_by_listener(self, context, listeners): + """Performs a validation against l7policies with a list of listeners + + This method will attempt to check the Neutron DB for a list of + l7policies that reference the given list of listener_id's. + + This will return a dict of: + {listener_id_0: bool, + ... + } + The bool will indicate that true: there are l7policies here, false: + there are none on this listener. + """ + has_l7policy = {} + try: + # NOTE: neutron_lbaas has a deprecated code filter for queries + # that appears to silence filter queries for 'listener_id' + l7policy_db = self.driver.plugin.db.get_l7policies(context) + except Exception as error: + LOG.exception("Exception: plugin.db.get_l7policies({}): " + "({})".format(listeners, error)) + return {} + LOG.debug("({}) = get_l7policies({})".format(l7policy_db, context)) + for listener_id in listeners: + # Given filter limitations, double-loop iterator results + result = False + if l7policy_db: + if isinstance(l7policy_db, list): + for l7policy in l7policy_db: + if l7policy.listener_id == listener_id: + result = True + break + else: + if l7policy_db.listener_id == listener_id: + result = True + else: + result = False + has_l7policy[listener_id] = result + LOG.debug("has_l7policy: ({})".format(has_l7policy)) + return has_l7policy From ad7dc08f0301c50e4f4eab16ea5416cd370ec9cc Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 3 Jul 2018 10:10:50 +0200 Subject: [PATCH 18/24] Dev/mitaka m3 (#3) * Changes in error handling when objects can't be found for orphan handling and in case scheduling for agent failed * Try 6 times to get an network and don't use bad defaults for the provider segment id and network type in case that the first try fails. Throw exeption if 6 tries fail for the read of network info. * Fix the _get_network_cached method and make it work again. First approach for fixing was completely nonsense * Add network segments to the 30 minute cache for networks. * Enable caching for network segmentation retrieval. Change defaults for network segmentation dict in case not found. * Changes in error handling when objects can't be found for orphan handling and in case scheduling for agent failed * Try 6 times to get an network and don't use bad defaults for the provider segment id and network type in case that the first try fails. Throw exeption if 6 tries fail for the read of network info. * Fix the _get_network_cached method and make it work again. First approach for fixing was completely nonsense * Add network segments to the 30 minute cache for networks. * Enable caching for network segmentation retrieval. Change defaults for network segmentation dict in case not found. --- dev_install | 3 +- f5lbaasdriver/v2/bigip/agent_scheduler.py | 17 ++ .../v2/bigip/disconnected_service.py | 28 +-- f5lbaasdriver/v2/bigip/driver_v2.py | 2 + f5lbaasdriver/v2/bigip/plugin_rpc.py | 29 ++- f5lbaasdriver/v2/bigip/service_builder.py | 211 ++++++++++-------- 6 files changed, 177 insertions(+), 113 deletions(-) diff --git a/dev_install b/dev_install index cd325622..cd1e6814 100755 --- a/dev_install +++ b/dev_install @@ -1,4 +1,5 @@ git init python setup.py install -dumb-init neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr1k.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini \ No newline at end of file +dumb-init neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr1k.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini +#python /var/lib/openstack/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr1k.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini \ No newline at end of file diff --git a/f5lbaasdriver/v2/bigip/agent_scheduler.py b/f5lbaasdriver/v2/bigip/agent_scheduler.py index 9f572bf6..359db1ea 100644 --- a/f5lbaasdriver/v2/bigip/agent_scheduler.py +++ b/f5lbaasdriver/v2/bigip/agent_scheduler.py @@ -159,6 +159,22 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): active=True ) + # ccloud: If no active agent can be found, get all non active ones to assign at least an agent to the LB. + # Otherwise the LB will be left in nirvana because it's already defined in neutron LB but without + # any assigned agent, so LB will never be scheduled to an agent and created on F5 + + if not candidates: + candidates = self.get_agents_in_env( + context, + plugin, + env, + active=False + ) + if not candidates: + LOG.error('ccloud: No f5 lbaas agents are active. No agent could be found for env %s' % env) + else: + LOG.error('ccloud: No f5 lbaas agents are active. Using a non active one for env %s' % env) + LOG.debug("candidate agents: %s", candidates) if len(candidates) == 0: LOG.error('No f5 lbaas agents are active for env %s' % env) @@ -235,6 +251,7 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): LOG.warn('No capacity left on any agents in env: %s' % env) LOG.warn('Group capacity in environment %s were %s.' % (env, capacity_by_group)) + LOG.error('ccloud: Aborting loadbalancer scheduling') raise lbaas_agentschedulerv2.NoEligibleLbaasAgent( loadbalancer_id=loadbalancer.id) diff --git a/f5lbaasdriver/v2/bigip/disconnected_service.py b/f5lbaasdriver/v2/bigip/disconnected_service.py index be8c5801..269576e1 100644 --- a/f5lbaasdriver/v2/bigip/disconnected_service.py +++ b/f5lbaasdriver/v2/bigip/disconnected_service.py @@ -22,7 +22,7 @@ class DisconnectedService(object): def __init__(self): - self.supported_encapsulations = ['vlan'] + self.supported_encapsulations = [u'vlan'] # Retain this method for future use in case a particular ML2 implementation # decouples network_id from physical_network name. The implementation in @@ -41,7 +41,6 @@ def get_network_segments(self, session): def get_network_segment(self, context, agent_configuration, network): data = None - network_segment_physical_network = \ agent_configuration.get('network_segment_physical_network', None) @@ -52,31 +51,22 @@ def get_network_segment(self, context, agent_configuration, network): # look up segment details in the ml2_network_segments table segments = db.get_network_segments(context.session, network['id'], filter_dynamic=None) - for segment in segments: - if ((network_segment_physical_network == - segment['physical_network']) and - (segment['network_type'].lower() in - supported_encapsulations)): + if (network_segment_physical_network == segment['physical_network'] and + segment['network_type'].lower() in supported_encapsulations): data = segment + LOG.debug("ccloud: Got network segment: %s" % segment) break - elif (network['provider:network_type'] == 'opflex' and + elif ('provider:network_type' in network and network['provider:network_type'] == 'opflex' and segment['network_type'] == 'vlan'): data = segment LOG.debug("Got OPFLEX segment: %s" % segment) break if not data: - LOG.debug('Using default segment for network %s' % - (network['id'])) - - # neutron is expected to provide this data immediately - data = { - 'segmentation_id': network['provider:segmentation_id'] - } - if 'provider:network_type' in network: - data['network_type'] = network['provider:network_type'] - if 'provider:physical_network' in network: - data['physical_network'] = network['provider:physical_network'] + data = {} + data['provider:network_type'] = None + data['provider:segmentation_id'] = None + data['provider:physical_network'] = None return data diff --git a/f5lbaasdriver/v2/bigip/driver_v2.py b/f5lbaasdriver/v2/bigip/driver_v2.py index 60ac2f1b..58979744 100644 --- a/f5lbaasdriver/v2/bigip/driver_v2.py +++ b/f5lbaasdriver/v2/bigip/driver_v2.py @@ -220,6 +220,8 @@ def create(self, context, loadbalancer): models.LoadBalancer, loadbalancer.id, plugin_constants.ERROR) + # ccloud: exit with exception in case scheduling failed + raise except Exception as e: LOG.error("Exception: loadbalancer create: %s" % e.message) raise e diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index ec0b5aa7..0dbd5c50 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -28,6 +28,7 @@ from neutron_lbaas.db.loadbalancer import models from neutron_lbaas import agent_scheduler from neutron_lbaas.services.loadbalancer import data_models +from neutron_lbaas.extensions import loadbalancerv2 from f5lbaasdriver.v2.bigip import constants_v2 as constants @@ -116,8 +117,7 @@ def get_service_by_loadbalancer_id( lb, agent) except Exception as e: - LOG.error("Exception: get_service_by_loadbalancer_id: %s", - e.message) + LOG.exception("ccloud Exception: get_service_by_loadbalancer_id:", e) return service @@ -788,11 +788,16 @@ def validate_loadbalancers_state(self, context, loadbalancers, host=None): lb_db = self.driver.plugin.db.get_loadbalancer(context, lbid) lb_status[lbid] = lb_db.provisioning_status - + # ccloud: distinguish betwwen REAL not founds and any other kind of errors to avoid + # treating the object as orphan in case of general db errors, ... + except loadbalancerv2.EntityNotFound as enf: + LOG.error('Exception: get_loadbalancer: %s', + enf.message) + lb_status[lbid] = 'Unknown' except Exception as e: LOG.error('Exception: get_loadbalancer: %s', e.message) - lb_status[lbid] = 'Unknown' + lb_status[lbid] = 'Indefinite' return lb_status # validate a list of pools id - assure they are not deleted @@ -804,10 +809,16 @@ def validate_pools_state(self, context, pools, host=None): try: pool_db = self.driver.plugin.db.get_pool(context, poolid) pool_status[poolid] = pool_db.provisioning_status + # ccloud: distinguish betwwen REAL not founds and any other kind of errors to avoid + # treating the object as orphan in case of general db errors, ... + except loadbalancerv2.EntityNotFound as enf: + LOG.error('Exception: get_pool: %s', + enf.message) + pool_status[poolid] = 'Unknown' except Exception as e: LOG.error('Exception: get_pool: %s', e.message) - pool_status[poolid] = 'Unknown' + pool_status[poolid] = 'Indefinite' return pool_status @log_helpers.log_method_call @@ -834,10 +845,16 @@ def validate_listeners_state(self, context, listeners, host=None): listener_id) listener_status[listener_id] = \ listener_db.provisioning_status + # ccloud: distinguish betwwen REAL not founds and any other kind of errors to avoid + # treating the object as orphan in case of general db errors, ... + except loadbalancerv2.EntityNotFound as enf: + LOG.error('Exception: get_listener: %s', + enf.message) + listener_status[listener_id] = 'Unknown' except Exception as e: LOG.error('Exception: get_listener: %s', e.message) - listener_status[listener_id] = 'Unknown' + listener_status[listener_id] = 'Indefinite' return listener_status # validate a list of l7policys id - assure they are not deleted diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index c8ceaf3b..68251457 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -15,6 +15,7 @@ # limitations under the License. # import datetime +import time import json from oslo_log import helpers as log_helpers @@ -47,7 +48,7 @@ def __init__(self, driver): self.net_cache = {} self.subnet_cache = {} - self.last_cache_update = datetime.datetime.fromtimestamp(0) + self.last_cache_update = datetime.datetime.now() #fromtimestamp(0) self.plugin = self.driver.plugin self.disconnected_service = DisconnectedService() self.q_client = q_client.F5NetworksNeutronClient(self.plugin) @@ -55,10 +56,10 @@ def __init__(self, driver): def build(self, context, loadbalancer, agent): """Get full service definition from loadbalancer ID.""" # Invalidate cache if it is too old - if ((datetime.datetime.now() - self.last_cache_update).seconds > - constants_v2.NET_CACHE_SECONDS): + if ((datetime.datetime.now() - self.last_cache_update).seconds > constants_v2.NET_CACHE_SECONDS): self.net_cache = {} self.subnet_cache = {} + self.last_cache_update = datetime.datetime.now() service = {} with context.session.begin(subtransactions=True): @@ -85,68 +86,68 @@ def build(self, context, loadbalancer, agent): vip_port = service['loadbalancer']['vip_port'] network_id = vip_port['network_id'] service['loadbalancer']['network_id'] = network_id - network = self._get_network_cached( - context, - network_id - ) # Override the segmentation ID and network type for this network # if we are running in disconnected service mode agent_config = self.deserialize_agent_configurations( agent['configurations']) - segment_data = self.disconnected_service.get_network_segment( - context, agent_config, network) - if segment_data: - network['provider:segmentation_id'] = \ - segment_data.get('segmentation_id', None) - network['provider:network_type'] = \ - segment_data.get('network_type', None) - network['provider:physical_network'] = \ - segment_data.get('physical_network', None) - network_map[network_id] = network - - # Check if the tenant can create a loadbalancer on the network. - if (agent and not self._valid_tenant_ids(network, - loadbalancer.tenant_id, - agent)): - LOG.error("Creating a loadbalancer %s for tenant %s on a" - " non-shared network %s owned by %s." % ( - loadbalancer.id, - loadbalancer.tenant_id, - network['id'], - network['tenant_id'])) - - # Get the network VTEPs if the network provider type is - # either gre or vxlan. - if 'provider:network_type' in network: - net_type = network['provider:network_type'] - if net_type == 'vxlan' or net_type == 'gre': - self._populate_loadbalancer_network_vteps( - context, - service['loadbalancer'], - net_type - ) - - # Get listeners and pools. - service['listeners'] = self._get_listeners(context, loadbalancer) - service['pools'], service['healthmonitors'] = \ - self._get_pools_and_healthmonitors(context, loadbalancer) - - service['members'] = self._get_members( - context, service['pools'], subnet_map, network_map) - - service['subnets'] = subnet_map - service['networks'] = network_map - - service['l7policies'] = self._get_l7policies( - context, service['listeners']) - service['l7policy_rules'] = self._get_l7policy_rules( - context, service['l7policies']) + try: + network = self._get_network_cached( + context, + network_id, + agent_config + ) - return service + network_map[network_id] = network + + # Check if the tenant can create a loadbalancer on the network. + if (agent and not self._valid_tenant_ids(network, + loadbalancer.tenant_id, + agent)): + LOG.error("Creating a loadbalancer %s for tenant %s on a" + " non-shared network %s owned by %s." % ( + loadbalancer.id, + loadbalancer.tenant_id, + network['id'], + network['tenant_id'])) + + # Get the network VTEPs if the network provider type is + # either gre or vxlan. + if 'provider:network_type' in network: + net_type = network['provider:network_type'] + if net_type == 'vxlan' or net_type == 'gre': + self._populate_loadbalancer_network_vteps( + context, + service['loadbalancer'], + net_type + ) + + # Get listeners and pools. + service['listeners'] = self._get_listeners(context, loadbalancer) + + service['pools'], service['healthmonitors'] = \ + self._get_pools_and_healthmonitors(context, loadbalancer) + + service['members'] = self._get_members( + context, service['pools'], subnet_map, network_map, agent_config) + + service['subnets'] = subnet_map + service['networks'] = network_map + + service['l7policies'] = self._get_l7policies( + context, service['listeners']) + service['l7policy_rules'] = self._get_l7policy_rules( + context, service['l7policies']) + + return service + + # Return nothing in case network retrieval failed + except Exception as e: + LOG.exception("ccloud: Build service for loadbalancer failed. Aborting with exception ", e) + raise @log_helpers.log_method_call - def _get_extended_member(self, context, member): + def _get_extended_member(self, context, member, agent_config): """Get extended member attributes and member networking.""" member_dict = member.to_dict(pool=False) subnet_id = member.subnet_id @@ -157,7 +158,8 @@ def _get_extended_member(self, context, member): network_id = subnet['network_id'] network = self._get_network_cached( context, - network_id + network_id, + agent_config ) member_dict['network_id'] = network_id @@ -206,20 +208,70 @@ def _get_subnet_cached(self, context, subnet_id): return self.subnet_cache[subnet_id] @log_helpers.log_method_call - def _get_network_cached(self, context, network_id): + def _get_network_cached(self, context, network_id, agent_config): """Retrieve network from cache or from Neutron.""" + network = None + # read network if not cached if network_id not in self.net_cache: - network = self.plugin.db._core_plugin.get_network( - context, - network_id - ) - if 'provider:network_type' not in network: - network['provider:network_type'] = 'undefined' - if 'provider:segmentation_id' not in network: - network['provider:segmentation_id'] = 0 - self.net_cache[network_id] = network + count = 0 + # try 3 times + while count < 3: + count += 1 + try: + if not network: + network = self.plugin.db._core_plugin.get_network( + context, + network_id) + # stop if found + if network: + break + else: + LOG.error("ccloud: Network ID %s NOT FOUND. Will try again in some seconds." % network_id) + time.sleep(3) + except Exception as e: + LOG.exception("ccloud: Exception in network retrieval for Network ID %s. Will try again in some seconds." % network_id) + time.sleep(3) + + # abort if network not found (not sure what to do in this case) + if not network: + LOG.error("ccloud: Network ID %s NOT FOUND. Aborting with Exception." % network_id) + raise Exception("ccloud: Network ID %s NOT FOUND. Aborting with Exception." % network_id) + + # try to get segment data for network 3 times + segment_data = None + while count < 3: + try: + segment_data = self.disconnected_service.get_network_segment( + context, agent_config, network) + # stop if found (means an id is given) + if segment_data.get('segmentation_id', None): + break + else: + LOG.warning("ccloud: Segment Data for network ID %s NOT FOUND #1. Will try again in some seconds." % network_id) + time.sleep(3) + except Exception as e: + LOG.exception("ccloud: Segment Data for network ID %s NOT FOUND #2. Will try again in some seconds.", network_id) + time.sleep(3) + + + network['provider:segmentation_id'] = \ + segment_data.get('segmentation_id', None) + network['provider:network_type'] = \ + segment_data.get('network_type', None) + network['provider:physical_network'] = \ + segment_data.get('physical_network', None) + + if segment_data.get('segmentation_id', None): + self.net_cache[network_id] = network + LOG.debug("ccloud: Network ID %s and Segment %s FOUND. Added to the cache." % (network_id, segment_data)) + else: + LOG.error("ccloud: Segment Data for network ID %s NOT FOUND. Returning dummy segment %s" % (network_id, segment_data)) + + else: + network = self.net_cache[network_id] + LOG.debug("ccloud: Network ID %s found and served from cache" % (network_id)) - return self.net_cache[network_id] + return network @log_helpers.log_method_call def _get_listener(self, context, listener_id): @@ -235,18 +287,7 @@ def _populate_member_network(self, context, member, network): member['vxlan_vteps'] = [] member['gre_vteps'] = [] - agent_config = {} - segment_data = self.disconnected_service.get_network_segment( - context, agent_config, network) - if segment_data: - network['provider:segmentation_id'] = \ - segment_data.get('segmentation_id', None) - network['provider:network_type'] = \ - segment_data.get('network_type', None) - network['provider:physical_network'] = \ - segment_data.get('physical_network', None) - - net_type = network.get('provider:network_type', "undefined") + net_type = network['provider:network_type'] if net_type == 'vxlan': if 'binding:host_id' in member['port']: host = member['port']['binding:host_id'] @@ -257,10 +298,6 @@ def _populate_member_network(self, context, member, network): host = member['port']['binding:host_id'] member['gre_vteps'] = self._get_endpoints( context, 'gre', host) - if 'provider:network_type' not in network: - network['provider:network_type'] = 'undefined' - if 'provider:segmentation_id' not in network: - network['provider:segmentation_id'] = 0 @log_helpers.log_method_call def _populate_loadbalancer_network_vteps( @@ -452,7 +489,7 @@ def _get_pools_and_healthmonitors(self, context, loadbalancer): return pools, healthmonitors @log_helpers.log_method_call - def _get_members(self, context, pools, subnet_map, network_map): + def _get_members(self, context, pools, subnet_map, network_map, agent_config): pool_members = [] if pools: members = self.plugin.db.get_pool_members( @@ -463,7 +500,7 @@ def _get_members(self, context, pools, subnet_map, network_map): for member in members: # Get extended member attributes, network, and subnet. member_dict, subnet, network = ( - self._get_extended_member(context, member) + self._get_extended_member(context, member, agent_config) ) subnet_map[subnet['id']] = subnet From 7b22529b0437d1b9394ade7c408c4380fe27a83a Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 6 Jul 2018 11:45:49 +0200 Subject: [PATCH 19/24] LBaaS Driver: Fix caching error in case of missing network segment retrieval Fix wrong retry logic for missing segments. Fix bindings assignment in case no agent ist live --- f5lbaasdriver/v2/bigip/agent_scheduler.py | 3 +-- f5lbaasdriver/v2/bigip/plugin_rpc.py | 2 +- f5lbaasdriver/v2/bigip/service_builder.py | 18 +++++++++++------- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/agent_scheduler.py b/f5lbaasdriver/v2/bigip/agent_scheduler.py index 359db1ea..c08c0fc0 100644 --- a/f5lbaasdriver/v2/bigip/agent_scheduler.py +++ b/f5lbaasdriver/v2/bigip/agent_scheduler.py @@ -167,8 +167,7 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): candidates = self.get_agents_in_env( context, plugin, - env, - active=False + env ) if not candidates: LOG.error('ccloud: No f5 lbaas agents are active. No agent could be found for env %s' % env) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 0dbd5c50..d3de7cc3 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -117,7 +117,7 @@ def get_service_by_loadbalancer_id( lb, agent) except Exception as e: - LOG.exception("ccloud Exception: get_service_by_loadbalancer_id:", e) + LOG.error("ccloud Error in get_service_by_loadbalancer_id. ID = %s. Message %s " % (loadbalancer_id, e)) return service diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 68251457..0f5d02c2 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -60,6 +60,7 @@ def build(self, context, loadbalancer, agent): self.net_cache = {} self.subnet_cache = {} self.last_cache_update = datetime.datetime.now() + LOG.debug('ccloud: Network cache regulary cleared after %s seconds' % constants_v2.NET_CACHE_SECONDS) service = {} with context.session.begin(subtransactions=True): @@ -211,8 +212,9 @@ def _get_subnet_cached(self, context, subnet_id): def _get_network_cached(self, context, network_id, agent_config): """Retrieve network from cache or from Neutron.""" network = None - # read network if not cached - if network_id not in self.net_cache: + # read network if not cached or no segment id given + if (network_id not in self.net_cache) or (network_id in self.net_cache and not self.net_cache[network_id]['provider:segmentation_id']): + LOG.debug("ccloud: Network ID %s NOT CACHED" % (network_id)) count = 0 # try 3 times while count < 3: @@ -239,7 +241,9 @@ def _get_network_cached(self, context, network_id, agent_config): # try to get segment data for network 3 times segment_data = None + count = 0 while count < 3: + count += 1 try: segment_data = self.disconnected_service.get_network_segment( context, agent_config, network) @@ -248,9 +252,9 @@ def _get_network_cached(self, context, network_id, agent_config): break else: LOG.warning("ccloud: Segment Data for network ID %s NOT FOUND #1. Will try again in some seconds." % network_id) - time.sleep(3) + time.sleep(10) except Exception as e: - LOG.exception("ccloud: Segment Data for network ID %s NOT FOUND #2. Will try again in some seconds.", network_id) + LOG.exception("ccloud: Segment Data for network ID %s NOT FOUND #2. Will try again in some seconds." % network_id) time.sleep(3) @@ -263,13 +267,13 @@ def _get_network_cached(self, context, network_id, agent_config): if segment_data.get('segmentation_id', None): self.net_cache[network_id] = network - LOG.debug("ccloud: Network ID %s and Segment %s FOUND. Added to the cache." % (network_id, segment_data)) + LOG.debug("ccloud: Network ID %s and Segment %s FOUND. Added to the cache, Cache: " % (network_id, segment_data)) else: - LOG.error("ccloud: Segment Data for network ID %s NOT FOUND. Returning dummy segment %s" % (network_id, segment_data)) + LOG.error("ccloud: Segment Data for network ID %s NOT FOUND. Returning dummy segment %s " % (network_id, segment_data)) else: network = self.net_cache[network_id] - LOG.debug("ccloud: Network ID %s found and served from cache" % (network_id)) + LOG.debug("ccloud: Network ID %s found and served from cache, Cache: " % (network_id)) return network From 15bc958e08923579511f8d4a4a4d1ac48ccbac55 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 17 Jul 2018 14:42:59 +0200 Subject: [PATCH 20/24] LBaaS: Fix message type --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index d3de7cc3..8b435f1c 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -117,7 +117,7 @@ def get_service_by_loadbalancer_id( lb, agent) except Exception as e: - LOG.error("ccloud Error in get_service_by_loadbalancer_id. ID = %s. Message %s " % (loadbalancer_id, e)) + LOG.warning("ccloud Error in get_service_by_loadbalancer_id. ID = %s. Message %s " % (loadbalancer_id, e)) return service From afbd2af3af4b8f35d18676171acc65a56a818753 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 24 Jul 2018 11:17:00 +0200 Subject: [PATCH 21/24] LBaaS: Don't abort orphan cleanup in case of a snat pool deletion error. Schedule lb's to overloaded agent in case partition is already scheduled. Clean orphan cache every 24 h --- f5lbaasdriver/v2/bigip/agent_scheduler.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/f5lbaasdriver/v2/bigip/agent_scheduler.py b/f5lbaasdriver/v2/bigip/agent_scheduler.py index c08c0fc0..490c9809 100644 --- a/f5lbaasdriver/v2/bigip/agent_scheduler.py +++ b/f5lbaasdriver/v2/bigip/agent_scheduler.py @@ -215,11 +215,16 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): chosen_agent = candidate break + # ccloud: schedule to an overloaded agent because we can't split partitions across F5 devicess if chosen_agent: # Does the agent which had tenants assigned # to it still have capacity? if group_capacity >= 1.0: - chosen_agent = None + LOG.error('ccloud: scheduling loadbalancer %s to an overloaded agent with capcity %s because ' + 'tenant is already assigned to this agent!' + % (loadbalancer_id, group_capacity)) + break + #chosen_agent = None else: break From a72e7e4640afbc3ff0b0f56c450864c763305b8c Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 26 Jul 2018 13:37:21 +0200 Subject: [PATCH 22/24] LBaaS: Added method for detection of unbound loadbalancers (lb's without agent binding) --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 64 +++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 8b435f1c..bd9b8ce2 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -42,6 +42,7 @@ class LBaaSv2PluginCallbacksRPC(object): def __init__(self, driver=None): """LBaaSv2PluginCallbacksRPC constructor.""" self.driver = driver + self.cluster_wide_agents = {} def create_rpc_listener(self): topic = constants.TOPIC_PROCESS_ON_HOST_V2 @@ -89,6 +90,19 @@ def get_active_loadbalancers_for_agent(self, context, host=None): active_lb_ids.add(lb.id) return active_lb_ids + # ccloud: get a list of loadbalancer without binding to an agent + @log_helpers.log_method_call + def get_loadbalancers_without_agent_binding(self, context, env, group): + + agents = self.driver.scheduler.get_agents_in_env( + context, + self.driver.plugin, + env, + group) + + return self. self._list_loadbalancers_without_lbaas_agent_binding(context, agents) + + @log_helpers.log_method_call def get_service_by_loadbalancer_id( self, @@ -268,8 +282,6 @@ def get_loadbalancers_by_network(self, context, env, network_id, group=None, hos def _list_loadbalancers_on_lbaas_agent(self, context, id): - query = context.session.query(models.LoadBalancer) - query = context.session.query(agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) query = query.filter_by(agent_id=id) @@ -281,7 +293,24 @@ def _list_loadbalancers_on_lbaas_agent(self, context, id): return [] + def _list_loadbalancers_without_lbaas_agent_binding(self, context, agents): + + all_lbs = self.driver.plugin.db._get_resources(context, models.LoadBalancer) + all_bindings = self.driver.plugin.db._get_resources(context, agent_scheduler.LoadbalancerAgentBinding) + agent_ids = [agent.id for agent in agents] + bound_ids = [] + for bind in all_bindings: + if bind['loadbalancer_id']: + if bind['agent_id'] and bind['agent_id'] in agent_ids: + bound_ids.append(bind['loadbalancer_id']) + + unbound_lbs = [] + for lb in all_lbs: + if lb['id'] not in bound_ids: + unbound_lbs.append(lb) + + return unbound_lbs @log_helpers.log_method_call def update_loadbalancer_stats(self, @@ -899,3 +928,34 @@ def validate_l7policys_state_by_listener(self, context, listeners): has_l7policy[listener_id] = result LOG.debug("has_l7policy: ({})".format(has_l7policy)) return has_l7policy + + # ccloud: Not used at the moment + # + # return a single active agent to implement cluster wide changes + # which can not efficiently mapped back to a particulare agent + @log_helpers.log_method_call + def get_clusterwide_agent(self, context, env, group, host=None): + """Get an agent to perform clusterwide tasks.""" + LOG.debug('getting agent to perform clusterwide tasks') + with context.session.begin(subtransactions=True): + if (env, group) in self.cluster_wide_agents: + known_agent = self.cluster_wide_agents[(env, group)] + if self.driver.plugin.db.is_eligible_agent(active=True, + agent=known_agent): + return known_agent + else: + del(self.cluster_wide_agents[(env, group)]) + try: + agents = \ + self.driver.scheduler.get_agents_in_env(context, + self.driver.plugin, + env, group, True) + if agents: + self.cluster_wide_agents[(env, group)] = agents[0] + return agents[0] + else: + LOG.error('no active agents available for clusterwide ', + ' tasks %s group number %s' % (env, group)) + except Exception as exc: + LOG.error('clusterwide agent exception: %s' % str(exc)) + return {} From f3172521beff07699d32ffe50e18e7fba1448211 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 27 Jul 2018 09:42:01 +0200 Subject: [PATCH 23/24] LBaaS: Fix typo in f5 lbaas driver --- f5lbaasdriver/v2/bigip/plugin_rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index bd9b8ce2..4d7b3739 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -100,7 +100,7 @@ def get_loadbalancers_without_agent_binding(self, context, env, group): env, group) - return self. self._list_loadbalancers_without_lbaas_agent_binding(context, agents) + return self._list_loadbalancers_without_lbaas_agent_binding(context, agents) @log_helpers.log_method_call From c1f01ddb746dc0410749eb27a96a29dddaca9738 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 15 Jan 2019 08:28:45 +0100 Subject: [PATCH 24/24] Improved handling for inactive agents to support upgrade of F5 devices better --- f5lbaasdriver/v2/bigip/agent_scheduler.py | 107 ++++++++++++++++----- f5lbaasdriver/v2/bigip/plugin_rpc.py | 109 ++++++++++++++++------ 2 files changed, 167 insertions(+), 49 deletions(-) diff --git a/f5lbaasdriver/v2/bigip/agent_scheduler.py b/f5lbaasdriver/v2/bigip/agent_scheduler.py index 490c9809..67d44d69 100644 --- a/f5lbaasdriver/v2/bigip/agent_scheduler.py +++ b/f5lbaasdriver/v2/bigip/agent_scheduler.py @@ -33,26 +33,29 @@ def __init__(self): """Initialze with the ChanceScheduler base class.""" super(TenantScheduler, self).__init__() + # ccloud: this method should work with rebinding. + # it only rebinds an agent of the same envGrp so no changes on BIGIPs will be made def get_lbaas_agent_hosting_loadbalancer(self, plugin, context, loadbalancer_id, env=None): """Return the agent that is hosting the loadbalancer.""" LOG.debug('Getting agent for loadbalancer %s with env %s' % (loadbalancer_id, env)) - lbaas_agent = None with context.session.begin(subtransactions=True): + lbaas_agent = None # returns {'agent': agent_dict} lbaas_agent = plugin.db.get_agent_hosting_loadbalancer( context, loadbalancer_id ) # if the agent bound to this loadbalancer is alive, return it - if lbaas_agent is not None: - - if not lbaas_agent['agent']['alive'] and env is not None: - # The agent bound to this loadbalancer is not live; - # find another agent in the same environment - # which environment group is the agent in + if lbaas_agent: + if (not lbaas_agent['agent']['alive'] or + not lbaas_agent['agent']['admin_state_up']) and \ + env is not None: + # The agent bound to this loadbalancer is not live + # or is not active. Find another agent in the same + # environment and environment group if possible ac = self.deserialize_agent_configurations( lbaas_agent['agent']['configurations'] ) @@ -61,25 +64,82 @@ def get_lbaas_agent_hosting_loadbalancer(self, plugin, context, gn = ac['environment_group_number'] else: gn = 1 - - # find all active agents matching the environment - # and group number. - env_agents = self.get_agents_in_env( - context, - plugin, - env, - group=gn, - active=True - ) - LOG.debug("Primary lbaas agent is dead, env_agents: %s", - env_agents) - if env_agents: - # return the first active agent in the - # group to process this task - lbaas_agent = {'agent': env_agents[0]} + LOG.debug("ccloud: scrubbing - Loadbalancer_id %s from EnvGroup %s will be rebound to agent %s" % + (loadbalancer_id, gn, lbaas_agent['agent'])) + reassigned_agent = self.rebind_loadbalancers( + context, plugin, env, gn, lbaas_agent['agent']) + if reassigned_agent: + lbaas_agent = {'agent': reassigned_agent} return lbaas_agent + def rebind_loadbalancers( + self, context, plugin, env, group, current_agent): + # wtn: check if this works + env_agents = self.get_agents_in_env(context, plugin, env, + group=group, active=True) + if env_agents: + reassigned_agent = env_agents[0] + bindings = \ + context.session.query( + agent_scheduler.LoadbalancerAgentBinding).filter_by( + agent_id=current_agent['id']).all() + + # wtn: disabled until tested + # for binding in bindings: + # binding.agent_id = reassigned_agent['id'] + # context.session.add(binding) + LOG.debug("ccloud: TESTRUN scrubbing: %s Loadbalancers from EnvGroup %s bound to agent %s now bound to %s" % + (len(bindings), + group, + current_agent['id'], + reassigned_agent['id'])) + + return reassigned_agent + else: + LOG.debug("ccloud: scrubbing - No active agent found for envGrp %s. Rebinding skipped for agent %s" % + (group, current_agent['id'])) + return None + + def get_dead_agents_in_env( + self, context, plugin, env, group=None): + return_agents = [] + all_agents = self.get_agents_in_env(context, + plugin, + env, + group, + active=None) + + for agent in all_agents: + if not plugin.db.is_eligible_agent(active=True, agent=agent): + agent_dead = plugin.db.is_agent_down( + agent['heartbeat_timestamp']) + if not agent['admin_state_up'] or agent_dead: + return_agents.append(agent) + return return_agents + + def scrub_dead_agents(self, context, plugin, env, group=None): + dead_agents = self.get_dead_agents_in_env(context, plugin, env, group) + for agent in dead_agents: + ag = None + if group is None: + LOG.info("ccloud: scrubbing agents across EnvGroups. Dead agent: {}".format(agent)) + ac = self.deserialize_agent_configurations( + agent['configurations'] + ) + if 'environment_group_number' in ac: + ag = ac['environment_group_number'] + LOG.info("ccloud: torsten found group for dead agent. EnvGroup: {}".format(ag)) + else: + LOG.info("ccloud: scrubbing agents for ONE EnvGroup number %s. Dead agent: %s" % (ag, agent)) + ag = group + if ag: + LOG.debug("ccloud: scrubbing - Dead agent found in EnvGroup %s . Agent: %s: " % (ag, agent)) + self.rebind_loadbalancers(context, plugin, env, ag, agent) + else: + LOG.debug("ccloud: scrubbing - Dead agent found without EnvGroup. Skipping scrubbing") + + def get_agents_in_env( self, context, plugin, env, group=None, active=None): """Get an active agents in the specified environment.""" @@ -268,4 +328,5 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): 'lbaas agent %(agent_id)s'), {'loadbalancer_id': loadbalancer.id, 'agent_id': chosen_agent['id']}) + return chosen_agent diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 4d7b3739..6c031f84 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -29,6 +29,7 @@ from neutron_lbaas import agent_scheduler from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.extensions import loadbalancerv2 +from neutron_lbaas.services.loadbalancer import constants as nlb_constant from f5lbaasdriver.v2.bigip import constants_v2 as constants @@ -57,6 +58,45 @@ def create_rpc_listener(self): fanout=False) self.conn.consume_in_threads() + # change the admin_state_up of the an agent + @log_helpers.log_method_call + def set_agent_admin_state(self, context, admin_state_up, host=None): + """Set the admin_up_state of an agent.""" + if not host: + LOG.error('tried to set agent admin_state_up without host') + return False + with context.session.begin(subtransactions=True): + query = context.session.query(agents_db.Agent) + query = query.filter( + agents_db.Agent.agent_type == + nlb_constant.AGENT_TYPE_LOADBALANCERV2, + agents_db.Agent.host == host) + try: + agent = query.one() + if not agent.admin_state_up == admin_state_up: + agent.admin_state_up = admin_state_up + context.session.add(agent) + except Exception as exc: + LOG.error('query for agent produced: %s' % str(exc)) + return False + return True + + # change the admin_state_up of the an agent + @log_helpers.log_method_call + def scrub_dead_agents(self, context, env, group, host=None): + """Remove all non-alive or admin down agents.""" + LOG.debug('scrubbing dead agent bindings for group %s' % group) + with context.session.begin(subtransactions=True): + try: + # don't set group because otherwise only agent of same group could initiate scrubbing. + # scrub method get's group out of a dead agent to find another agent inside same group + self.driver.scheduler.scrub_dead_agents( + context, self.driver.plugin, env, group=None) + except Exception as exc: + LOG.error('scub dead agents exception: %s' % str(exc)) + return False + return True + # get a list of loadbalancer ids which are active on this agent host @log_helpers.log_method_call def get_active_loadbalancers_for_agent(self, context, host=None): @@ -105,10 +145,7 @@ def get_loadbalancers_without_agent_binding(self, context, env, group): @log_helpers.log_method_call def get_service_by_loadbalancer_id( - self, - context, - loadbalancer_id=None, - host=None): + self, context, loadbalancer_id=None, host=None): """Get the complete service definition by loadbalancer_id.""" service = {} with context.session.begin(subtransactions=True): @@ -127,9 +164,8 @@ def get_service_by_loadbalancer_id( # the preceeding get call returns a nested dict, unwind # one level if necessary agent = (agent['agent'] if 'agent' in agent else agent) - service = self.driver.service_builder.build(context, - lb, - agent) + service = self.driver.service_builder.build( + context, lb, agent) except Exception as e: LOG.warning("ccloud Error in get_service_by_loadbalancer_id. ID = %s. Message %s " % (loadbalancer_id, e)) @@ -142,12 +178,10 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): plugin = self.driver.plugin with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) agents = self.driver.scheduler.get_agents_in_env( - context, - self.driver.plugin, - env, - group) - + context, plugin, env, group, active=None) for agent in agents: agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, @@ -168,19 +202,15 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): @log_helpers.log_method_call def get_active_loadbalancers(self, context, env, group=None, host=None): - """Get all loadbalancers for this group in this env.""" + """Get active loadbalancers for this group in this env.""" loadbalancers = [] plugin = self.driver.plugin with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) agents = self.driver.scheduler.get_agents_in_env( - context, - self.driver.plugin, - env, - group=group, - active=True - ) - + context, plugin, env, group, active=None) for agent in agents: agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, @@ -204,17 +234,15 @@ def get_active_loadbalancers(self, context, env, group=None, host=None): @log_helpers.log_method_call def get_pending_loadbalancers(self, context, env, group=None, host=None): - """Get all loadbalancers for this group in this env.""" + """Get pending loadbalancers for this group in this env.""" loadbalancers = [] plugin = self.driver.plugin with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) agents = self.driver.scheduler.get_agents_in_env( - context, - self.driver.plugin, - env, - group) - + context, plugin, env, group, active=None) for agent in agents: agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, @@ -237,6 +265,35 @@ def get_pending_loadbalancers(self, context, env, group=None, host=None): else: return loadbalancers + @log_helpers.log_method_call + def get_errored_loadbalancers(self, context, env, group=None, host=None): + """Get pending loadbalancers for this group in this env.""" + loadbalancers = [] + plugin = self.driver.plugin + with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) + agents = self.driver.scheduler.get_agents_in_env( + context, plugin, env, group, active=None) + for agent in agents: + agent_lbs = self._list_loadbalancers_on_lbaas_agent( + context, + agent.id + ) + for lb in agent_lbs: + if (lb.provisioning_status == plugin_constants.ERROR): + loadbalancers.append( + { + 'agent_host': agent['host'], + 'lb_id': lb.id, + 'tenant_id': lb.tenant_id + } + ) + if host: + return [lb for lb in loadbalancers if lb['agent_host'] == host] + else: + return loadbalancers + @log_helpers.log_method_call def get_loadbalancers_by_network(self, context, env, network_id, group=None, host=None,): """Get all loadbalancers for this group in this env."""