From 9aad0f23ee278463b99291ac4dac321789889122 Mon Sep 17 00:00:00 2001 From: Priya Seth Date: Thu, 26 Dec 2019 03:49:55 -0500 Subject: [PATCH 1/7] Changes for adding ppc64le support: correct version of postgresql is already installed in base image --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index e594b256058..3ac0ac98c2f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,6 @@ ENV DATABASE_URL=postgresql://root@localhost/vmdb_production?encoding=utf8&pool= RUN yum -y install --setopt=tsflags=nodocs \ memcached \ - postgresql-server \ mod_ssl \ openssh-clients \ openssh-server \ From 557d8d0869b68d1768a8aeeb8a4f347b44f2f0c0 Mon Sep 17 00:00:00 2001 From: Priya Seth Date: Wed, 1 Jan 2020 09:51:01 +0530 Subject: [PATCH 2/7] Update .travis.yml Update .travis.yml Update .travis.yml Update .travis.yml Update .travis.yml Update .travis.yml Update before_install.sh Update before_install.sh Update setup_ruby_env.sh --- .travis.yml | 9 ++++++--- tools/ci/setup_ruby_env.sh | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index e17f7c10ccb..b4d19767e54 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,7 @@ -dist: xenial +dist: bionic +arch: +- amd64 +- ppc64le language: ruby rvm: - 2.5.7 @@ -17,8 +20,8 @@ env: - TEST_SUITE=brakeman matrix: fast_finish: true -addons: - postgresql: '10' +services: + - postgresql before_install: - source ${TRAVIS_BUILD_DIR}/tools/ci/before_install.sh before_script: diff --git a/tools/ci/setup_ruby_env.sh b/tools/ci/setup_ruby_env.sh index e763ff94458..86f4bdc6974 100644 --- a/tools/ci/setup_ruby_env.sh +++ b/tools/ci/setup_ruby_env.sh @@ -1,3 +1,4 @@ ./tools/ci/setup_ruby_environment.rb +bundle config --local build.sassc --disable-march-tune-native export BUNDLE_WITHOUT=development export BUNDLE_GEMFILE=${PWD}/Gemfile From 7fc39fe98c70dcd7a0d6c802566de306d6dac0d3 Mon Sep 17 00:00:00 2001 From: Priya Seth Date: Fri, 17 Jan 2020 01:24:40 -0500 Subject: [PATCH 3/7] address review comments --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 3ac0ac98c2f..f9722609cf0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,6 +6,7 @@ ENV DATABASE_URL=postgresql://root@localhost/vmdb_production?encoding=utf8&pool= RUN yum -y install --setopt=tsflags=nodocs \ memcached \ + postgresql-server \ mod_ssl \ openssh-clients \ openssh-server \ From 482e04aaa791afa7b57d2b545d13e659316a05cc Mon Sep 17 00:00:00 2001 From: Priya Seth Date: Thu, 6 Feb 2020 01:26:17 -0500 Subject: [PATCH 4/7] Address review comments --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f9722609cf0..e594b256058 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ ENV DATABASE_URL=postgresql://root@localhost/vmdb_production?encoding=utf8&pool= RUN yum -y install --setopt=tsflags=nodocs \ memcached \ - postgresql-server \ + postgresql-server \ mod_ssl \ openssh-clients \ openssh-server \ From b6fc82c9d668aa5f8391109b030980f8bd263e24 Mon Sep 17 00:00:00 2001 From: Priya Seth Date: Fri, 27 Mar 2020 12:42:39 -0400 Subject: [PATCH 5/7] Indentation correction --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b4d19767e54..a8dd6838a04 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ env: matrix: fast_finish: true services: - - postgresql +- postgresql before_install: - source ${TRAVIS_BUILD_DIR}/tools/ci/before_install.sh before_script: From ab7369ec6d8adc1569f9ba9873c3a12f0e2724fe Mon Sep 17 00:00:00 2001 From: Siddhesh-Ghadi <61187612+Siddhesh-Ghadi@users.noreply.github.com> Date: Tue, 28 Apr 2020 13:14:03 +0530 Subject: [PATCH 6/7] Sync with ManageIQ/manageiq->master --- .gitignore | 3 +- CHANGELOG.md | 25 ++ Gemfile | 5 +- app/models/authenticator/base.rb | 13 +- app/models/conversion_host.rb | 32 +- app/models/conversion_host/configurations.rb | 6 +- app/models/custom_button.rb | 2 +- app/models/dialog_field_importer.rb | 2 +- app/models/ems_event.rb | 22 +- app/models/ext_management_system.rb | 42 ++- app/models/filesystem.rb | 2 +- app/models/host.rb | 12 +- app/models/infra_conversion_job.rb | 11 +- app/models/manageiq/providers/base_manager.rb | 5 +- .../providers/base_manager/refresher.rb | 12 +- .../cloud_manager/provision/state_machine.rb | 2 +- .../automation_manager/machine_credential.rb | 2 +- .../providers/embedded_automation_manager.rb | 4 +- .../manageiq/providers/infra_manager.rb | 5 +- .../manageiq/providers/network_manager.rb | 4 +- .../cinder_manager/refresher.rb | 7 - app/models/metric/ci_mixin/processing.rb | 25 ++ app/models/miq_ae_class.rb | 2 +- app/models/miq_ae_namespace.rb | 48 +-- app/models/miq_cockpit_ws_worker/runner.rb | 21 +- app/models/miq_event_handler/runner.rb | 35 -- app/models/miq_queue.rb | 64 +++- app/models/miq_report.rb | 17 + app/models/miq_report/formatters/text.rb | 2 +- app/models/miq_report/generator.rb | 4 +- app/models/miq_report_result.rb | 4 +- app/models/miq_web_service_worker.rb | 5 + app/models/miq_worker/container_common.rb | 2 +- app/models/mixins/compliance_mixin.rb | 6 + .../mixins/miq_provision_quota_mixin.rb | 2 +- app/models/mixins/process_tasks_mixin.rb | 2 +- app/models/service_template.rb | 37 +-- ...rvice_template_transformation_plan_task.rb | 26 +- app/models/storage_profile.rb | 2 +- app/models/time_profile.rb | 2 +- app/models/vm_or_template.rb | 11 + app/models/vm_reconfigure_task.rb | 2 +- bin/setup | 28 +- config/database.pg.yml | 2 +- config/initializers/fast_gettext.rb | 8 +- config/initializers/permissions_repository.rb | 12 - config/messaging.artemis.yml | 15 + config/messaging.kafka.yml | 15 + config/settings.yml | 10 +- .../create_customization_templates_fixture.rb | 2 +- .../tools/create_pxe_image_types_fixture.rb | 2 +- .../object_definition.rb | 43 ++- lib/extensions/ar_miq_set.rb | 2 + lib/extensions/ar_types.rb | 2 +- lib/manageiq/environment.rb | 16 +- lib/miq_cockpit.rb | 2 +- lib/miq_expression.rb | 23 -- lib/rbac/filterer.rb | 2 +- lib/services/dialog_import_service.rb | 24 +- .../git_based_domain_import_service.rb | 140 ++++++++ lib/tasks/test.rake | 10 +- lib/unique_within_region_validator.rb | 13 + lib/vmdb/fast_gettext_helper.rb | 14 +- lib/vmdb/permission_stores.rb | 43 ++- lib/vmdb/permission_stores/null.rb | 17 - lib/vmdb/permission_stores/yaml.rb | 23 -- spec/factories/ext_management_system.rb | 24 +- spec/lib/miq_expression_spec.rb | 39 --- spec/lib/rbac/filterer_spec.rb | 1 + .../services/dialog_import_service_spec.rb | 3 + .../git_based_domain_import_service_spec.rb | 303 ++++++++++++++++++ spec/lib/vmdb/permission_stores_spec.rb | 70 +--- spec/models/authenticator/httpd_spec.rb | 12 + spec/models/classification_spec.rb | 4 +- .../conversion_host/configurations_spec.rb | 15 +- spec/models/conversion_host_spec.rb | 10 +- spec/models/ems_event_spec.rb | 36 ++- spec/models/ext_management_system_spec.rb | 52 ++- spec/models/infra_conversion_job_spec.rb | 126 +++++++- .../automation_manager_spec.rb | 12 +- .../manageiq/providers/infra_manager_spec.rb | 10 + spec/models/miq_ae_class_spec.rb | 17 +- spec/models/miq_ae_field_spec.rb | 3 +- spec/models/miq_ae_instance_spec.rb | 17 +- spec/models/miq_ae_method_spec.rb | 10 +- spec/models/miq_ae_namespace_spec.rb | 14 +- spec/models/miq_policy_spec.rb | 9 + spec/models/miq_queue_spec.rb | 85 +++++ spec/models/miq_report/generator_spec.rb | 17 + spec/models/miq_report_spec.rb | 11 + spec/models/miq_web_server_worker_spec.rb | 8 + .../miq_worker/container_common_spec.rb | 1 - ..._template_transformation_plan_task_spec.rb | 31 +- spec/models/vm_or_template_spec.rb | 82 +++++ spec/models/vm_reconfigure_task_spec.rb | 16 +- spec/support/ems_refresh_helper.rb | 2 +- spec/support/vmdb_permission_store_helper.rb | 25 -- .../server_settings_replicator_spec.rb | 36 --- tools/configure_server_settings.rb | 81 ++--- tools/db_printers/print_network.rb | 6 +- tools/db_printers/print_scsi.rb | 6 +- .../server_settings_replicator.rb | 31 -- 102 files changed, 1559 insertions(+), 671 deletions(-) delete mode 100644 config/initializers/permissions_repository.rb create mode 100644 config/messaging.artemis.yml create mode 100644 config/messaging.kafka.yml create mode 100644 lib/services/git_based_domain_import_service.rb delete mode 100644 lib/vmdb/permission_stores/null.rb delete mode 100644 lib/vmdb/permission_stores/yaml.rb create mode 100644 spec/lib/services/git_based_domain_import_service_spec.rb create mode 100644 spec/models/miq_web_server_worker_spec.rb delete mode 100644 spec/support/vmdb_permission_store_helper.rb delete mode 100644 spec/tools/server_settings_replicator/server_settings_replicator_spec.rb delete mode 100755 tools/server_settings_replicator/server_settings_replicator.rb diff --git a/.gitignore b/.gitignore index 1f1036de3d4..7f640e6d716 100644 --- a/.gitignore +++ b/.gitignore @@ -42,9 +42,10 @@ bin/* # config/ config/apache +config/cable.yml config/cockpit config/database.yml* -config/cable.yml +config/messaging.yml config/vmdb.yml.db config/initializers/*.local.rb diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f84af14cc0..a7b19aa30aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,31 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +# Unreleased as of Sprint 134 ending 2020-04-13 + +### Fixed + +* Fix messaging_client exception if no ENV and no yaml are present [(#20062)](https://github.com/ManageIQ/manageiq/pull/20062) +* Fix a bug when running Rbac on VimPerformanceDaily [(#20058)](https://github.com/ManageIQ/manageiq/pull/20058) +* Fix improperly addressed rubocop warning [(#20044)](https://github.com/ManageIQ/manageiq/pull/20044) +* Only show catalog types for supported EMSs [(#20039)](https://github.com/ManageIQ/manageiq/pull/20039) +* Fix storage profile ems foreign key [(#20038)](https://github.com/ManageIQ/manageiq/pull/20038) + +# Unreleased as of Sprint 133 ending 2020-03-30 + +### Added + +* Add additional privilege escalation methods [(#20019)](https://github.com/ManageIQ/manageiq/pull/20019) +* Add kafka connection info to worker containers [(#20000)](https://github.com/ManageIQ/manageiq/pull/20000) +* Add kafka as an option for prototype.queue_type [(#19984)](https://github.com/ManageIQ/manageiq/pull/19984) +* Consolidate existing server setting CLI tools [(#19848)](https://github.com/ManageIQ/manageiq/pull/19848) + +### Fixed + +* Remove the journald logger to resolve segfaults [(#20004)](https://github.com/ManageIQ/manageiq/pull/20004) +* Don't queue things that need to run on the same worker container [(#19956)](https://github.com/ManageIQ/manageiq/pull/19956) +* Embedded ansible provider should allow creation in maintenance zone [(#19947)](https://github.com/ManageIQ/manageiq/pull/19947) + # Unreleased as of Sprint 132 ending 2020-03-16 ### Added diff --git a/Gemfile b/Gemfile index 8884dc2aaa5..8d3a66af8db 100644 --- a/Gemfile +++ b/Gemfile @@ -31,7 +31,7 @@ gem "acts_as_tree", "~>2.7" # acts_as_tree needs to be require gem "ancestry", "~>3.0.7", :require => false gem "aws-sdk-s3", "~>1.0", :require => false # For FileDepotS3 gem "bcrypt", "~> 3.1.10", :require => false -gem "bundler", ">=1.15", :require => false +gem "bundler", "~> 2.1.4", :require => false gem "byebug", :require => false gem "color", "~>1.8" gem "config", "~>2.2", ">=2.2.1", :require => false @@ -53,6 +53,7 @@ gem "manageiq-loggers", "~>0.5.0", :require => false gem "manageiq-messaging", "~>0.1.4", :require => false gem "manageiq-password", "~>0.3", :require => false gem "manageiq-postgres_ha_admin", "~>3.1", :require => false +gem "manageiq-ssh-util", "~>0.1.0", :require => false gem "memoist", "~>0.15.0", :require => false gem "mime-types", "~>3.0", :path => File.expand_path("mime-types-redirector", __dir__) gem "money", "~>6.13.5", :require => false @@ -138,7 +139,7 @@ group :redfish, :manageiq_default do end group :qpid_proton, :optional => true do - gem "qpid_proton", "~>0.26.0", :require => false + gem "qpid_proton", "~>0.30.0", :require => false end group :systemd, :optional => true do diff --git a/app/models/authenticator/base.rb b/app/models/authenticator/base.rb index a150732ad80..716dcfe90d0 100644 --- a/app/models/authenticator/base.rb +++ b/app/models/authenticator/base.rb @@ -137,7 +137,18 @@ def authorize(taskid, username, *args) end user.lastlogon = Time.now.utc - user.save! + if user.new_record? + User.with_lock do + user.save! + rescue ActiveRecord::RecordInvalid # Try update when catching create race condition. + userid, user = find_or_initialize_user(identity, username) + update_user_attributes(user, userid, identity) + user.miq_groups = matching_groups + user.save! + end + else + user.save! + end _log.info("Authorized User: [#{user.userid}]") task.userid = user.userid diff --git a/app/models/conversion_host.rb b/app/models/conversion_host.rb index 2484f2136ab..4249a648750 100644 --- a/app/models/conversion_host.rb +++ b/app/models/conversion_host.rb @@ -78,9 +78,9 @@ def verify_credentials(auth_type = 'v2v', options = {}) Net::SSH.start(host, auth.userid, ssh_options) { |ssh| ssh.exec!('uname -a') } end rescue Net::SSH::AuthenticationFailed => err - raise MiqException::MiqInvalidCredentialsError, _("Incorrect credentials - %{error_message}") % {:error_message => err.message} + raise err, _("Incorrect credentials - %{error_message}") % {:error_message => err.message} rescue Net::SSH::HostKeyMismatch => err - raise MiqException::MiqSshUtilHostKeyMismatch, _("Host key mismatch - %{error_message}") % {:error_message => err.message} + raise err, _("Host key mismatch - %{error_message}") % {:error_message => err.message} rescue Exception => err raise _("Unknown error - %{error_message}") % {:error_message => err.message} else @@ -155,8 +155,8 @@ def ipaddress(family = 'ipv4') # # @return [Integer] length of data written to file # - # @raise [MiqException::MiqInvalidCredentialsError] if conversion host credentials are invalid - # @raise [MiqException::MiqSshUtilHostKeyMismatch] if conversion host key has changed + # @raise [Net::SSH::AuthenticationFailed] if conversion host credentials are invalid + # @raise [Net::SSH::HostKeyMismatch] if conversion host key has changed # @raise [JSON::GeneratorError] if limits hash can't be converted to JSON # @raise [StandardError] if any other problem happens def apply_task_limits(task_id, limits = {}) @@ -165,7 +165,7 @@ def apply_task_limits(task_id, limits = {}) command = AwesomeSpawn.build_command_line("mv", ["/tmp/#{task_id}-limits.json", "/var/lib/uci/#{task_id}/limits.json"]) ssu.shell_exec(command, nil, nil, nil) end - rescue MiqException::MiqInvalidCredentialsError, MiqException::MiqSshUtilHostKeyMismatch => err + rescue Net::SSH::AuthenticationFailed, Net::SSH::HostKeyMismatch => err raise "Failed to connect and apply limits for task '#{task_id}' with [#{err.class}: #{err}]" rescue JSON::GeneratorError => err raise "Could not generate JSON from limits '#{limits}' with [#{err.class}: #{err}]" @@ -180,8 +180,8 @@ def apply_task_limits(task_id, limits = {}) # # @return [Integer] length of data written to conversion options file # - # @raise [MiqException::MiqInvalidCredentialsError] if conversion host credentials are invalid - # @raise [MiqException::MiqSshUtilHostKeyMismatch] if conversion host key has changed + # @raise [Net::SSH::AuthenticationFailed] if conversion host credentials are invalid + # @raise [Net::SSH::HostKeyMismatch] if conversion host key has changed # @raise [JSON::GeneratorError] if limits hash can't be converted to JSON # @raise [StandardError] if any other problem happens def prepare_conversion(task_id, conversion_options) @@ -197,7 +197,7 @@ def prepare_conversion(task_id, conversion_options) command = AwesomeSpawn.build_command_line("mv", ["/tmp/#{task_id}-input.json", "/var/lib/uci/#{task_id}/input.json"]) ssu.shell_exec(command, nil, nil, nil) end - rescue MiqException::MiqInvalidCredentialsError, MiqException::MiqSshUtilHostKeyMismatch => err + rescue Net::SSH::AuthenticationFailed, Net::SSH::HostKeyMismatch => err raise "Failed to connect and prepare conversion for task '#{task_id}' with [#{err.class}: #{err}]" rescue JSON::GeneratorError => err raise "Could not generate JSON for task '#{task_id}' from options '#{filtered_options}' with [#{err.class}: #{err}]" @@ -210,14 +210,14 @@ def prepare_conversion(task_id, conversion_options) # # @return [Boolean] true if the file can be retrieved and parsed, false otherwise # - # @raise [MiqException::MiqInvalidCredentialsError] if conversion host credentials are invalid - # @raise [MiqException::MiqSshUtilHostKeyMismatch] if conversion host key has changed + # @raise [Net::SSH::AuthenticationFailed] if conversion host credentials are invalid + # @raise [Net::SSH::HostKeyMismatch] if conversion host key has changed # @raise [JSON::ParserError] if file cannot be parsed as JSON def luks_keys_vault_valid? luks_keys_vault_json = connect_ssh { |ssu| ssu.get_file("/root/.v2v_luks_keys_vault.json", nil) } JSON.parse(luks_keys_vault_json) true - rescue MiqException::MiqInvalidCredentialsError, MiqException::MiqSshUtilHostKeyMismatch => err + rescue Net::SSH::AuthenticationFailed, Net::SSH::HostKeyMismatch => err raise "Failed to connect and retrieve LUKS keys vault from file '/root/.v2v_luks_keys_vault.json' with [#{err.class}: #{err}]" rescue JSON::ParserError raise "Could not parse conversion state data from file '/root/.v2v_luks_keys_vault.json': #{json_state}" @@ -266,7 +266,7 @@ def run_conversion(task_id, conversion_options) filtered_options = filter_options(conversion_options) prepare_conversion(task_id, conversion_options) connect_ssh { |ssu| ssu.shell_exec(build_podman_command(task_id, conversion_options), nil, nil, nil) } - rescue MiqException::MiqInvalidCredentialsError, MiqException::MiqSshUtilHostKeyMismatch => err + rescue Net::SSH::AuthenticationFailed, Net::SSH::HostKeyMismatch => err raise "Failed to connect and run conversion using options #{filtered_options} with [#{err.class}: #{err}]" rescue => err raise "Starting conversion for task '#{task_id}' failed on '#{resource.name}' with [#{err.class}: #{err}]" @@ -313,7 +313,7 @@ def kill_virtv2v(task_id, signal = 'TERM') def get_conversion_state(task_id) json_state = connect_ssh { |ssu| ssu.get_file("/var/lib/uci/#{task_id}/state.json", nil) } JSON.parse(json_state) - rescue MiqException::MiqInvalidCredentialsError, MiqException::MiqSshUtilHostKeyMismatch => err + rescue Net::SSH::AuthenticationFailed, Net::SSH::HostKeyMismatch => err raise "Failed to connect and retrieve conversion state data from file '/var/lib/uci/#{task_id}/state.json' with [#{err.class}: #{err}]" rescue JSON::ParserError raise "Could not parse conversion state data from file '/var/lib/uci/#{task_id}/state.json': #{json_state}" @@ -408,12 +408,12 @@ def filter_options(options) options.clone.tap { |h| h.each { |k, _v| h[k] = "__FILTERED__" if ignore.any? { |i| k.to_s.end_with?(i) } } } end - # Connect to the conversion host using the MiqSshUtil wrapper using the authentication + # Connect to the conversion host using the ManageIQ::SSH::Util wrapper using the authentication # parameters appropriate for that type of resource. # def connect_ssh - require 'MiqSshUtil' - MiqSshUtil.shell_with_su(*miq_ssh_util_args) do |ssu, _shell| + require 'manageiq-ssh-util' + ManageIQ::SSH::Util.shell_with_su(*miq_ssh_util_args) do |ssu, _shell| yield(ssu) end rescue Exception => e diff --git a/app/models/conversion_host/configurations.rb b/app/models/conversion_host/configurations.rb index 25bddeef836..b8fb427c03c 100644 --- a/app/models/conversion_host/configurations.rb +++ b/app/models/conversion_host/configurations.rb @@ -54,6 +54,7 @@ def enable_queue(params, auth_user = nil) params = params.symbolize_keys resource = params.delete(:resource) + raise "#{resource.class.name.demodulize} '#{resource.name}' doesn't have a hostname or IP address in inventory" if resource.hostname.nil? && resource.ipaddresses.empty? raise "the resource '#{resource.name}' is already configured as a conversion host" if ConversionHost.exists?(:resource => resource) params[:resource_id] = resource.id @@ -113,12 +114,13 @@ def disable_queue(auth_user = nil) def disable(_params = nil, _auth_user = nil) resource_info = "type=#{resource.class.name} id=#{resource.id}" - _log.debug("Disabling a conversion_host #{resource_info}") + raise "There are active migration tasks running on this conversion host" if active_tasks.present? + _log.debug("Disabling a conversion_host #{resource_info}") disable_conversion_host_role destroy! rescue StandardError => error - raise + raise error ensure self.class.notify_configuration_result('disable', error.nil?, resource_info) end diff --git a/app/models/custom_button.rb b/app/models/custom_button.rb index 195d664a0e6..8e68e9a8d1f 100644 --- a/app/models/custom_button.rb +++ b/app/models/custom_button.rb @@ -3,7 +3,7 @@ class CustomButton < ApplicationRecord scope :with_array_order, lambda { |ids, column = :id, column_type = :bigint| order = sanitize_sql_array(["array_position(ARRAY[?]::#{column_type}[], #{table_name}.#{column}::#{column_type})", ids]) - order(order) + order(Arel.sql(order)) } serialize :options, Hash diff --git a/app/models/dialog_field_importer.rb b/app/models/dialog_field_importer.rb index 74a60a902df..8d071d1d159 100644 --- a/app/models/dialog_field_importer.rb +++ b/app/models/dialog_field_importer.rb @@ -34,7 +34,7 @@ def import_field(dialog_field_attributes, export_version = DialogImportService:: elsif dialog_field_attributes["type"].nil? dialog_field_attributes.delete("dialog_field_responders") dialog_field_attributes.delete("resource_action") - DialogField.create(dialog_field_attributes) + DialogField.create!(dialog_field_attributes) else raise InvalidDialogFieldTypeError end diff --git a/app/models/ems_event.rb b/app/models/ems_event.rb index 036ea8e00d2..560e34a05db 100644 --- a/app/models/ems_event.rb +++ b/app/models/ems_event.rb @@ -25,22 +25,22 @@ def self.bottleneck_event_groups end def self.add_queue(meth, ems_id, event) - if Settings.prototype.queue_type == 'artemis' - MiqQueue.artemis_client('event_handler').publish_topic( - :service => "events", + unless MiqQueue.messaging_type == "miq_queue" + MiqQueue.messaging_client('event_handler')&.publish_topic( + :service => "manageiq.ems-events", :sender => ems_id, :event => event[:event_type], :payload => event ) - else - MiqQueue.submit_job( - :service => "event", - :target_id => ems_id, - :class_name => "EmsEvent", - :method_name => meth, - :args => [event], - ) end + + MiqQueue.submit_job( + :service => "event", + :target_id => ems_id, + :class_name => "EmsEvent", + :method_name => meth, + :args => [event] + ) end def self.add(ems_id, event_hash) diff --git a/app/models/ext_management_system.rb b/app/models/ext_management_system.rb index 930f5230df3..4643cb2870e 100644 --- a/app/models/ext_management_system.rb +++ b/app/models/ext_management_system.rb @@ -17,22 +17,17 @@ def self.supported_types supported_subclasses.collect(&:ems_type) end - def self.leaf_subclasses - descendants.select { |d| d.subclasses.empty? } + def self.supported_subclasses + leaf_subclasses.select(&:permitted?) end - def self.supported_subclasses - subclasses.flat_map do |s| - s.subclasses.empty? ? s : s.supported_subclasses - end + def self.permitted? + Vmdb::PermissionStores.instance.supported_ems_type?(ems_type) end + delegate :permitted?, :to => :class def self.supported_types_and_descriptions_hash - supported_subclasses.each_with_object({}) do |klass, hash| - if Vmdb::PermissionStores.instance.supported_ems_type?(klass.ems_type) - hash[klass.ems_type] = klass.description - end - end + supported_subclasses.each_with_object({}) { |klass, hash| hash[klass.ems_type] = klass.description } end def self.api_allowed_attributes @@ -40,13 +35,21 @@ def self.api_allowed_attributes end def self.supported_types_for_create - leaf_subclasses.select(&:supported_for_create?) + supported_subclasses.select(&:supported_for_create?) + end + + def self.supported_types_for_catalog + supported_subclasses.select(&:supported_for_catalog?) end def self.supported_for_create? !reflections.include?("parent_manager") end + def self.supported_for_catalog? + catalog_types.present? + end + def self.provider_create_params supported_types_for_create.each_with_object({}) do |ems_type, create_params| create_params[ems_type.name] = ems_type.params_for_create if ems_type.respond_to?(:params_for_create) @@ -540,6 +543,9 @@ def last_refresh_status end end + # Queue an EMS refresh using +opts+. Credentials must exist, and the + # authentication status must be ok, otherwise an error is raised. + # def refresh_ems(opts = {}) if missing_credentials? raise _("no Provider credentials defined") @@ -550,6 +556,18 @@ def refresh_ems(opts = {}) EmsRefresh.queue_refresh(self, nil, opts) end + alias queue_refresh refresh_ems + + # Execute an EMS refresh immediately. Credentials must exist, and the + # authentication status must be ok, otherwise an error is raised. + # + def refresh + raise _("no Provider credentials defined") if missing_credentials? + raise _("Provider failed last authentication check") unless authentication_status_ok? + + EmsRefresh.refresh(self) + end + def self.ems_infra_discovery_types @ems_infra_discovery_types ||= %w(virtualcenter scvmm rhevm openstack_infra) end diff --git a/app/models/filesystem.rb b/app/models/filesystem.rb index bc8a8ce90cc..626e9804d01 100644 --- a/app/models/filesystem.rb +++ b/app/models/filesystem.rb @@ -46,7 +46,7 @@ def self.process_sub_xml(xmlNode, path, options = {}) nh = e.attributes.to_h nh[:base_name] = nh[:name] - nh[:name] = File.join(path, nh[:name]) + nh[:name] = nh[:fqname] nh[:rsc_type] = e.name nh.delete(:fqname) nh[:mtime] = Time.parse(nh[:mtime]) diff --git a/app/models/host.rb b/app/models/host.rb index 8698d29decd..bb2e73cfd56 100644 --- a/app/models/host.rb +++ b/app/models/host.rb @@ -799,9 +799,9 @@ def verify_credentials_with_ssh(auth_type = nil, options = {}) # connect_ssh logs address and user name(s) being used to make connection _log.info("Verifying Host SSH credentials for [#{name}]") connect_ssh(options) { |ssu| ssu.exec("uname -a") } - rescue MiqException::MiqInvalidCredentialsError - raise MiqException::MiqInvalidCredentialsError, _("Login failed due to a bad username or password.") - rescue MiqException::MiqSshUtilHostKeyMismatch + rescue Net::SSH::AuthenticationFailed => err + raise err, _("Login failed due to a bad username or password.") + rescue Net::SSH::HostKeyMismatch raise # Re-raise the error so the UI can prompt the user to allow the keys to be reset. rescue Exception => err _log.warn(err.inspect) @@ -1058,7 +1058,7 @@ def ssh_users_and_passwords end def connect_ssh(options = {}) - require 'MiqSshUtil' + require 'manageiq-ssh-util' rl_user, rl_password, su_user, su_password, additional_options = ssh_users_and_passwords options.merge!(additional_options) @@ -1073,7 +1073,7 @@ def connect_ssh(options = {}) _log.info("Initiating SSH connection to Host:[#{name}] using [#{hostname}] for user:[#{users}]. Options:[#{logged_options.inspect}]") begin - MiqSshUtil.shell_with_su(hostname, rl_user, rl_password, su_user, su_password, options) do |ssu, _shell| + ManageIQ::SSH::Util.shell_with_su(hostname, rl_user, rl_password, su_user, su_password, options) do |ssu, _shell| _log.info("SSH connection established to [#{hostname}]") yield(ssu) end @@ -1410,7 +1410,7 @@ def scan_from_queue(taskid = nil) save end - rescue MiqException::MiqSshUtilHostKeyMismatch + rescue Net::SSH::HostKeyMismatch # Keep from dumping stack trace for this error which is sufficiently logged in the connect_ssh method rescue => err _log.log_backtrace(err) diff --git a/app/models/infra_conversion_job.rb b/app/models/infra_conversion_job.rb index 4b51bb15f32..ad98cd2a7f1 100644 --- a/app/models/infra_conversion_job.rb +++ b/app/models/infra_conversion_job.rb @@ -229,6 +229,7 @@ def task_progress def update_migration_task_progress(state_phase, state_progress = nil) progress = task_progress + return if progress[:status] == "error" state_hash = send(state_phase, progress[:states][state.to_sym], state_progress) progress[:states][state.to_sym] = state_hash if state_phase == :on_entry @@ -252,10 +253,12 @@ def handover_to_automate end def abort_conversion(message, status) + _log.error("Aborting conversion: #{message}") migration_task.canceling progress = task_progress progress[:current_description] = "Migration failed: #{message}. Cancelling" - progress[:status] = "error" + progress[:status] = status + progress[:states][state.to_sym] = {} if state == 'waiting_to_start' migration_task.update_options(:progress => progress) queue_signal(:abort_virtv2v) end @@ -487,7 +490,9 @@ def poll_transform_vm_complete migration_task.get_conversion_state case migration_task.options[:virtv2v_status] when 'active' - unless migration_task.warm_migration? + if migration_task.two_phase? + update_migration_task_progress(:on_retry, :message => 'Converting disks') + else virtv2v_disks = migration_task.options[:virtv2v_disks] converted_disks = virtv2v_disks.reject { |disk| disk[:percent].zero? } if converted_disks.empty? @@ -499,8 +504,8 @@ def poll_transform_vm_complete message = "Converting disk #{converted_disks.length} / #{virtv2v_disks.length} [#{percent.round(2)}%]." end update_migration_task_progress(:on_retry, :message => message, :percent => percent) - queue_signal(:poll_transform_vm_complete, :deliver_on => Time.now.utc + state_retry_interval) end + queue_signal(:poll_transform_vm_complete, :deliver_on => Time.now.utc + state_retry_interval) when 'failed' raise migration_task.options[:virtv2v_message] when 'succeeded' diff --git a/app/models/manageiq/providers/base_manager.rb b/app/models/manageiq/providers/base_manager.rb index ac414a68bed..d869079da2d 100644 --- a/app/models/manageiq/providers/base_manager.rb +++ b/app/models/manageiq/providers/base_manager.rb @@ -16,9 +16,10 @@ def ext_management_system self end - def supported_catalog_types - [] + def self.catalog_types + {} end + delegate :catalog_types, :to => :class def refresher self.class::Refresher diff --git a/app/models/manageiq/providers/base_manager/refresher.rb b/app/models/manageiq/providers/base_manager/refresher.rb index 76084c933c5..5aaeb44dabb 100644 --- a/app/models/manageiq/providers/base_manager/refresher.rb +++ b/app/models/manageiq/providers/base_manager/refresher.rb @@ -129,21 +129,13 @@ def parse_targeted_inventory(ems, _target, inventory) persister end - def parse_legacy_inventory(ems) - ems.class::RefreshParser.ems_inv_to_hashes(ems, refresher_options) - end - # Saves the inventory to the DB # # @param ems [ManageIQ::Providers::BaseManager] # @param target [ManageIQ::Providers::BaseManager or InventoryRefresh::Target or InventoryRefresh::TargetCollection] # @param parsed [Array or ManageIQ::Providers::Inventory::Persister] - def save_inventory(ems, target, parsed_hashes_or_persister) - if parsed_hashes_or_persister.kind_of?(ManageIQ::Providers::Inventory::Persister) - parsed_hashes_or_persister.persist! - else - EmsRefresh.save_ems_inventory(ems, parsed_hashes_or_persister, target) - end + def save_inventory(ems, _target, persister) + InventoryRefresh::SaveInventory.save_inventory(ems, persister.inventory_collections) end def post_refresh_ems_cleanup(_ems, _targets) diff --git a/app/models/manageiq/providers/cloud_manager/provision/state_machine.rb b/app/models/manageiq/providers/cloud_manager/provision/state_machine.rb index e9d7fd9747b..72739440e9a 100644 --- a/app/models/manageiq/providers/cloud_manager/provision/state_machine.rb +++ b/app/models/manageiq/providers/cloud_manager/provision/state_machine.rb @@ -10,7 +10,7 @@ def determine_placement end def prepare_volumes - if options[:volumes] + if options[:volumes].present? phase_context[:requested_volumes] = create_requested_volumes(options[:volumes]) signal :poll_volumes_complete else diff --git a/app/models/manageiq/providers/embedded_ansible/automation_manager/machine_credential.rb b/app/models/manageiq/providers/embedded_ansible/automation_manager/machine_credential.rb index eb217e903fd..1a4c8cd4032 100644 --- a/app/models/manageiq/providers/embedded_ansible/automation_manager/machine_credential.rb +++ b/app/models/manageiq/providers/embedded_ansible/automation_manager/machine_credential.rb @@ -32,7 +32,7 @@ class ManageIQ::Providers::EmbeddedAnsible::AutomationManager::MachineCredential :type => :choice, :label => N_('Privilege Escalation'), :help_text => N_('Privilege escalation method'), - :choices => ['', 'sudo', 'su', 'pbrun', 'pfexec'] + :choices => ['', 'sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'pmrun', 'runas', 'enable', 'ksu', 'sesu', 'machinectl'] }, :become_username => { :type => :string, diff --git a/app/models/manageiq/providers/embedded_automation_manager.rb b/app/models/manageiq/providers/embedded_automation_manager.rb index a747fbcd202..e971593a3e1 100644 --- a/app/models/manageiq/providers/embedded_automation_manager.rb +++ b/app/models/manageiq/providers/embedded_automation_manager.rb @@ -10,7 +10,7 @@ def self.supported_for_create? false end - def supported_catalog_types - %w(generic_ansible_playbook) + def self.catalog_types + {"generic_ansible_playbook" => N_("Ansible Playbook")} end end diff --git a/app/models/manageiq/providers/infra_manager.rb b/app/models/manageiq/providers/infra_manager.rb index 876370e6413..2ad0074fff0 100644 --- a/app/models/manageiq/providers/infra_manager.rb +++ b/app/models/manageiq/providers/infra_manager.rb @@ -33,6 +33,7 @@ class InfraManager < BaseManager has_many :networks, :through => :hardwares has_many :guest_devices, :through => :hardwares has_many :ems_custom_attributes, :through => :vms_and_templates + has_many :clusterless_hosts, -> { where(:ems_cluster =>nil) }, :class_name => "Host", :foreign_key => "ems_id", :inverse_of => :ext_management_system include HasManyOrchestrationStackMixin @@ -66,10 +67,6 @@ def self.ems_timeouts(type, service = nil) def validate_authentication_status {:available => true, :message => nil} end - - def clusterless_hosts - hosts.where(:ems_cluster => nil) - end end def self.display_name(number = 1) diff --git a/app/models/manageiq/providers/network_manager.rb b/app/models/manageiq/providers/network_manager.rb index e8ba72e4aad..dc82739a44e 100644 --- a/app/models/manageiq/providers/network_manager.rb +++ b/app/models/manageiq/providers/network_manager.rb @@ -70,9 +70,7 @@ class << model_name def self.supported_types_and_descriptions_hash supported_subclasses.select(&:supports_ems_network_new?).each_with_object({}) do |klass, hash| - if Vmdb::PermissionStores.instance.supported_ems_type?(klass.ems_type) - hash[klass.ems_type] = klass.description - end + hash[klass.ems_type] = klass.description end end diff --git a/app/models/manageiq/providers/storage_manager/cinder_manager/refresher.rb b/app/models/manageiq/providers/storage_manager/cinder_manager/refresher.rb index 7da738aef68..939566f0463 100644 --- a/app/models/manageiq/providers/storage_manager/cinder_manager/refresher.rb +++ b/app/models/manageiq/providers/storage_manager/cinder_manager/refresher.rb @@ -1,12 +1,5 @@ # module ManageIQ::Providers class StorageManager::CinderManager::Refresher < ManageIQ::Providers::BaseManager::Refresher - def parse_legacy_inventory(ems) - ManageIQ::Providers::StorageManager::CinderManager::RefreshParser.ems_inv_to_hashes(ems, refresher_options) - end - - def post_process_refresh_classes - [] - end end end diff --git a/app/models/metric/ci_mixin/processing.rb b/app/models/metric/ci_mixin/processing.rb index c7d95ca12ee..fa7a4b45d92 100644 --- a/app/models/metric/ci_mixin/processing.rb +++ b/app/models/metric/ci_mixin/processing.rb @@ -94,6 +94,8 @@ def perf_process(interval_name, start_time, end_time, counters_data) resources.each do |resource| resource.perf_rollup_to_parents(interval_orig, start_time, end_time) end + + publish_metrics(rt_rows) end _log.info("#{log_header} Processing for #{log_specific_targets(resources)}, for range [#{start_time} - #{end_time}]...Complete - Timings: #{t.inspect}") @@ -197,4 +199,27 @@ def normalize_value(value, counter) end return value, message end + + def publish_metrics(metrics) + return if MiqQueue.messaging_type == "miq_queue" + + metrics.each_value do |metric| + resource = metric.delete(:resource) + + metric[:parent_ems_type] = resource.ext_management_system&.class&.ems_type + + metric[:resource_type] = resource.class.base_class.name + metric[:resource_id] = resource.id + + metric[:resource_manager_ref] = resource.ems_ref if resource.respond_to?(:ems_ref) + metric[:resource_manager_uid] = resource.uid_ems if resource.respond_to?(:uid_ems) + + MiqQueue.messaging_client("metrics_capture")&.publish_topic( + :service => "manageiq.metrics", + :sender => resource.ext_management_system&.id, + :event => "metrics", + :payload => metric + ) + end + end end diff --git a/app/models/miq_ae_class.rb b/app/models/miq_ae_class.rb index dc5520b001a..a46984dd88d 100644 --- a/app/models/miq_ae_class.rb +++ b/app/models/miq_ae_class.rb @@ -29,7 +29,7 @@ def self.lookup_by_namespace_and_name(name_space, name, _args = {}) name_space = MiqAeNamespace.lookup_by_fqname(name_space) return nil if name_space.nil? - name_space.ae_classes.detect { |c| name.casecmp(c.name).zero? } + where(:namespace_id => name_space.id).find_by(arel_table[:name].lower.eq(name.downcase)) end singleton_class.send(:alias_method, :find_by_namespace_and_name, :lookup_by_namespace_and_name) diff --git a/app/models/miq_ae_namespace.rb b/app/models/miq_ae_namespace.rb index cf2d0ae0028..4a07052efbf 100644 --- a/app/models/miq_ae_namespace.rb +++ b/app/models/miq_ae_namespace.rb @@ -1,5 +1,8 @@ +require 'ancestry' +require 'ancestry_patch' + class MiqAeNamespace < ApplicationRecord - acts_as_tree + has_ancestry include MiqAeSetUserInfoMixin include MiqAeYamlImportExportMixin @@ -8,24 +11,26 @@ class MiqAeNamespace < ApplicationRecord /^commit_time/, /^commit_sha/, /^ref$/, /^ref_type$/, /^last_import_on/, /^source/, /^top_level_namespace/].freeze - belongs_to :parent, :class_name => "MiqAeNamespace", :foreign_key => :parent_id - has_many :ae_namespaces, :class_name => "MiqAeNamespace", :foreign_key => :parent_id, :dependent => :destroy - has_many :ae_classes, -> { includes([:ae_methods, :ae_fields, :ae_instances]) }, :class_name => "MiqAeClass", :foreign_key => :namespace_id, :dependent => :destroy + has_many :ae_classes, -> { includes([:ae_methods, :ae_fields, :ae_instances]) }, :class_name => "MiqAeClass", + :foreign_key => :namespace_id, :dependent => :destroy, :inverse_of => false + + validates :name, + :format => {:with => /\A[\w\.\-\$]+\z/i, :message => N_("may contain only alphanumeric and _ . - $ characters")}, + :presence => true, + :uniqueness => {:scope => :ancestry, :case_sensitive => false} - validates_presence_of :name - validates_format_of :name, :with => /\A[\w\.\-\$]+\z/i, - :message => N_("may contain only alphanumeric and _ . - $ characters") - validates_uniqueness_of :name, :scope => :parent_id, :case_sensitive => false + alias ae_namespaces children + virtual_has_many :ae_namespaces def self.lookup_by_fqname(fqname, include_classes = true) return nil if fqname.blank? - fqname = fqname[0] == '/' ? fqname : "/#{fqname}" - fqname = fqname.downcase - last = fqname.split('/').last + fqname = fqname[0] == '/' ? fqname : "/#{fqname}" + fqname = fqname.downcase + last = fqname.split('/').last low_name = arel_table[:name].lower - query = includes(:parent) - query = query.includes(:ae_classes) if include_classes + + query = include_classes ? includes(:ae_classes) : all query.where(low_name.eq(last)).detect { |namespace| namespace.fqname.downcase == fqname } end @@ -35,21 +40,22 @@ def self.lookup_by_fqname(fqname, include_classes = true) def self.find_or_create_by_fqname(fqname, include_classes = true) return nil if fqname.blank? - fqname = fqname[1..-1] if fqname[0] == '/' + fqname = fqname[1..-1] if fqname[0] == '/' found = lookup_by_fqname(fqname, include_classes) return found unless found.nil? parts = fqname.split('/') new_parts = [parts.pop] loop do + break if parts.empty? + found = lookup_by_fqname(parts.join('/'), include_classes) break unless found.nil? new_parts.unshift(parts.pop) - break if parts.empty? end new_parts.each do |p| - found = create(:name => p, :parent_id => found.try(:id)) + found = found ? create(:name => p, :parent => found) : create(:name => p) end found @@ -82,7 +88,7 @@ def self.find_tree(find_options = {}) end def fqname - @fqname ||= "/#{ancestors.collect(&:name).reverse.push(name).join('/')}" + @fqname ||= "/#{path.pluck(:name).join('/')}" end def editable?(user = User.current_user) @@ -106,15 +112,11 @@ def domain_name end def domain - if domain? - self - elsif (ns = ancestors.last) && ns.domain? - ns - end + root if root.domain? end def domain? - parent_id.nil? && name != '$' + root? && name != '$' end def self.display_name(number = 1) diff --git a/app/models/miq_cockpit_ws_worker/runner.rb b/app/models/miq_cockpit_ws_worker/runner.rb index d8d24dec227..7f57a0ae6f8 100644 --- a/app/models/miq_cockpit_ws_worker/runner.rb +++ b/app/models/miq_cockpit_ws_worker/runner.rb @@ -66,6 +66,8 @@ def stop_cockpit_ws def stop_cockpit_ws_process return unless @pid + @stdout&.close + @stderr&.close Process.kill("TERM", @pid) wait_on_cockpit_ws end @@ -73,7 +75,7 @@ def stop_cockpit_ws_process # Waits for a cockpit-ws process to stop. The process is expected to be # in the act of shutting down, and thus it will wait 5 minutes # before issuing a kill. - def wait_on_cockpit_ws(pid) + def wait_on_cockpit_ws(pid = nil) pid ||= @pid # TODO: Use Process.waitpid or one of its async variants begin @@ -148,13 +150,18 @@ def cockpit_ws_run "XDG_CONFIG_DIRS" => cockpit_ws.config_dir, "DRB_URI" => @drb_uri } - Bundler.with_clean_env do - stdin, stdout, stderr, wait_thr = Open3.popen3(env, *cockpit_ws.command(BINDING_ADDRESS), :unsetenv_others => true) + _log.info("Starting cockpit-ws process with command: #{cockpit_ws.command(BINDING_ADDRESS)} ") + _log.info("Cockpit environment #{env} ") + stdin, stdout, stderr, wait_thr = Bundler.with_clean_env do + Open3.popen3(env, cockpit_ws.command(BINDING_ADDRESS), :unsetenv_others => true) + end + stdin&.close + if wait_thr + _log.info("#{log_prefix} cockpit-ws process started - pid=#{wait_thr.pid}") + return wait_thr.pid, stdout, stderr + else + raise "Cockpit-ws process failed to start" end - stdin.close - - _log.info("#{log_prefix} cockpit-ws process started - pid=#{@pid}") - return wait_thr.pid, stdout, stderr end def check_drb_service diff --git a/app/models/miq_event_handler/runner.rb b/app/models/miq_event_handler/runner.rb index 9a76ddd979a..612d4e4997f 100644 --- a/app/models/miq_event_handler/runner.rb +++ b/app/models/miq_event_handler/runner.rb @@ -1,37 +1,2 @@ class MiqEventHandler::Runner < MiqQueueWorkerBase::Runner - def artemis? - Settings.prototype.queue_type == 'artemis' - end - - def do_before_work_loop - if artemis? - topic_options = { - :service => "events", - :persist_ref => "event_handler" - } - - # this block is stored in a lambda callback and is executed in another thread once a msg is received - MiqQueue.artemis_client('event_handler').subscribe_topic(topic_options) do |sender, event, payload| - _log.info "Received Event (#{event}) by sender #{sender}: #{payload[:event_type]} #{payload[:chain_id]}" - EmsEvent.add(sender.to_i, payload) - end - _log.info "Listening for events..." - end - end - - def do_work - # If we are using MiqQueue then use the default do_work method - super unless artemis? - - # we dont do any work, we are lazy - # upon msg received, the messaging thread will execute the block in .subscribe_topic as above - # sleeping is done in do_work_loop - end - - def before_exit(_message, _exit_code) - return unless artemis? - MiqQueue.artemis_client('event_handler').close - rescue => e - safe_log("Could not close artemis connection: #{e}", 1) - end end diff --git a/app/models/miq_queue.rb b/app/models/miq_queue.rb index 30f43bf4899..5bd87986b14 100644 --- a/app/models/miq_queue.rb +++ b/app/models/miq_queue.rb @@ -32,24 +32,28 @@ class MiqQueue < ApplicationRecord PRIORITY_WHICH = [:max, :high, :normal, :low, :min] PRIORITY_DIR = [:higher, :lower] - def self.artemis_client(client_ref) - @artemis_client ||= {} - @artemis_client[client_ref] ||= begin + def self.messaging_type + ENV["MESSAGING_TYPE"] || Settings.prototype.messaging_type + end + + def self.messaging_client(client_ref) + @messaging_client ||= {} + return if messaging_type == "miq_queue" + + @messaging_client[client_ref] ||= begin require "manageiq-messaging" ManageIQ::Messaging.logger = _log - queue_settings = Settings.prototype.artemis - connect_opts = { - :host => ENV["ARTEMIS_QUEUE_HOSTNAME"] || queue_settings.queue_hostname, - :port => (ENV["ARTEMIS_QUEUE_PORT"] || queue_settings.queue_port).to_i, - :username => ENV["ARTEMIS_QUEUE_USERNAME"] || queue_settings.queue_username, - :password => ENV["ARTEMIS_QUEUE_PASSWORD"] || queue_settings.queue_password, - :client_ref => client_ref, - } # caching the client works, even if the connection becomes unavailable # internally the client will track the state of the connection and re-open it, # once it's available again - at least thats true for a stomp connection - ManageIQ::Messaging::Client.open(connect_opts) + options = messaging_client_options&.merge(:client_ref => client_ref) + return if options.nil? + + ManageIQ::Messaging::Client.open(options) + rescue => err + _log.warn("Failed to open messaging client: #{err}") + nil end end @@ -641,6 +645,42 @@ def self.optional_values(options, keys = [:zone]) private_class_method :optional_values + def self.messaging_client_options + (messaging_options_from_env || messaging_options_from_file)&.merge( + :encoding => "json", + :protocol => messaging_protocol, + )&.tap { |h| h[:password] = MiqPassword.try_decrypt(h.delete(:password)) } + end + private_class_method :messaging_client_options + + def self.messaging_protocol + case messaging_type + when "artemis" + :Stomp + when "kafka" + :Kafka + end + end + private_class_method :messaging_protocol + + private_class_method def self.messaging_options_from_env + return unless ENV["MESSAGING_HOSTNAME"] && ENV["MESSAGING_PORT"] && ENV["MESSAGING_USERNAME"] && ENV["MESSAGING_PASSWORD"] + + { + :host => ENV["MESSAGING_HOSTNAME"], + :port => ENV["MESSAGING_PORT"].to_i, + :username => ENV["MESSAGING_USERNAME"], + :password => ENV["MESSAGING_PASSWORD"], + } + end + + MESSAGING_CONFIG_FILE = Rails.root.join("config", "messaging.yml") + private_class_method def self.messaging_options_from_file + return unless MESSAGING_CONFIG_FILE.file? + + YAML.load_file(MESSAGING_CONFIG_FILE)[Rails.env].symbolize_keys.tap { |h| h[:host] = h.delete(:hostname) } + end + def destroy_potentially_stale_record destroy rescue ActiveRecord::StaleObjectError diff --git a/app/models/miq_report.rb b/app/models/miq_report.rb index 9f84d3adc92..e29cd0da666 100644 --- a/app/models/miq_report.rb +++ b/app/models/miq_report.rb @@ -42,6 +42,7 @@ class MiqReport < ApplicationRecord virtual_column :human_expression, :type => :string virtual_column :based_on, :type => :string + virtual_column :col_format_with_defaults, :type => :string_set alias_attribute :menu_name, :name attr_accessor :ext_options @@ -89,6 +90,22 @@ def self.having_report_results(options = {}) q end + def col_format_with_defaults + return [] unless cols.present? + + cols.each_with_index.map do |column, index| + column_format = col_formats.try(:[], index) + if column_format + column_format + else + column = Chargeback.default_column_for_format(column.to_s) if Chargeback.db_is_chargeback?(db) + expression_col = col_to_expression_col(column) + column_type = MiqExpression.parse_field_or_tag(expression_col).try(:column_type)&.to_sym + MiqReport::Formats.default_format_for_path(expression_col, column_type) + end + end + end + # NOTE: this can by dynamically manipulated def cols self[:cols] ||= (self[:col_order] || []).reject { |x| x.include?(".") } diff --git a/app/models/miq_report/formatters/text.rb b/app/models/miq_report/formatters/text.rb index 178c124de02..8a1941b3351 100644 --- a/app/models/miq_report/formatters/text.rb +++ b/app/models/miq_report/formatters/text.rb @@ -1,6 +1,6 @@ module MiqReport::Formatters::Text def to_text - ReportFormatter::ReportRenderer.render(:text) do |e| + ManageIQ::Reporting::Formatter::ReportRenderer.render(:text) do |e| e.options.mri = (self) # set the MIQ_Report instance in the formatter e.options.ignore_table_width = true end diff --git a/app/models/miq_report/generator.rb b/app/models/miq_report/generator.rb index 5b5964ab803..25b9518665f 100644 --- a/app/models/miq_report/generator.rb +++ b/app/models/miq_report/generator.rb @@ -324,7 +324,8 @@ def generate_basic_results(options = {}) targets = db_class.find_entries(ext_options) if targets.respond_to?(:find_entries) # TODO: add once only_cols is fixed # targets = targets.select(only_cols) - where_clause = MiqExpression.merge_where_clauses(self.where_clause, options[:where_clause]) + targets = targets.where(where_clause) if where_clause + targets = targets.where(options[:where_clause]) if options[:where_clause] # Remove custom_attributes as part of the `includes` if all of them exist # in the select statement @@ -337,7 +338,6 @@ def generate_basic_results(options = {}) :filter => conditions, :include_for_find => get_include_for_find_rbac, :references => get_include_rbac, - :where_clause => where_clause, :skip_counts => true ) diff --git a/app/models/miq_report_result.rb b/app/models/miq_report_result.rb index 2b76335854f..b59e07fad3c 100644 --- a/app/models/miq_report_result.rb +++ b/app/models/miq_report_result.rb @@ -384,11 +384,11 @@ def self.with_current_user_groups end def self.with_chargeback - includes(:miq_report).where(:miq_reports => {:db => Chargeback.subclasses}) + includes(:miq_report).where(:miq_reports => {:db => Chargeback.subclasses.collect(&:name)}) end def self.with_saved_chargeback_reports(report_id = nil) - with_report(report_id).auto_generated.with_current_user_groups.with_chargeback.order('LOWER(miq_reports.name)') + with_report(report_id).auto_generated.with_current_user_groups.with_chargeback.order(Arel.sql('LOWER(miq_reports.name)')) end def self.select_distinct_results diff --git a/app/models/miq_web_service_worker.rb b/app/models/miq_web_service_worker.rb index 24fc516ed3c..cc779ede487 100644 --- a/app/models/miq_web_service_worker.rb +++ b/app/models/miq_web_service_worker.rb @@ -26,4 +26,9 @@ def self.bundler_groups def self.kill_priority MiqWorkerType::KILL_PRIORITY_WEB_SERVICE_WORKERS end + + def self.preload_for_worker_role + super + Api::ApiConfig.collections.each { |_k, v| v.klass.try(:constantize).try(:descendants) } + end end diff --git a/app/models/miq_worker/container_common.rb b/app/models/miq_worker/container_common.rb index 419e30bfc33..c4e1390d9d8 100644 --- a/app/models/miq_worker/container_common.rb +++ b/app/models/miq_worker/container_common.rb @@ -34,7 +34,7 @@ def container_image_name end def container_image_tag - "latest" + ENV["CONTAINER_IMAGE_TAG"] || "latest" end def deployment_prefix diff --git a/app/models/mixins/compliance_mixin.rb b/app/models/mixins/compliance_mixin.rb index 78d4b482faf..47674a47e3a 100644 --- a/app/models/mixins/compliance_mixin.rb +++ b/app/models/mixins/compliance_mixin.rb @@ -31,4 +31,10 @@ def check_compliance_queue def scan_and_check_compliance_queue Compliance.scan_and_check_compliance_queue(self) end + + def compliance_policies + target_class = self.class.base_model.name.downcase + _, plist = MiqPolicy.get_policies_for_target(self, "compliance", "#{target_class}_compliance_check") + plist + end end diff --git a/app/models/mixins/miq_provision_quota_mixin.rb b/app/models/mixins/miq_provision_quota_mixin.rb index 1586e779a7b..e0de52edd6d 100644 --- a/app/models/mixins/miq_provision_quota_mixin.rb +++ b/app/models/mixins/miq_provision_quota_mixin.rb @@ -109,7 +109,7 @@ def quota_find_vms_by_group(options) end def quota_find_vms_by_owner_and_group(options) - scope = Vm.not(:host_id => nil) + scope = Vm.where.not(:host_id => nil) if options[:retired_vms_only] == true scope = scope.where(:retired => true) elsif options[:include_retired_vms] == false diff --git a/app/models/mixins/process_tasks_mixin.rb b/app/models/mixins/process_tasks_mixin.rb index db319d78109..15008d235b8 100644 --- a/app/models/mixins/process_tasks_mixin.rb +++ b/app/models/mixins/process_tasks_mixin.rb @@ -187,7 +187,7 @@ def invoke_task_local(task, instance, options, args) def validate_tasks(options) tasks = [] - instances = base_class.where(:id => options[:ids]).order("lower(name)").to_a + instances = base_class.where(:id => options[:ids]).order(Arel.sql("lower(name)")).to_a return instances, tasks unless options[:invoke_by] == :task # jobs will be used instead of tasks for feedback instances.each do |instance| diff --git a/app/models/service_template.rb b/app/models/service_template.rb index 07be67c543b..ee98c0e4141 100644 --- a/app/models/service_template.rb +++ b/app/models/service_template.rb @@ -12,21 +12,6 @@ class ServiceTemplate < ApplicationRecord "storage" => N_("Storage") }.freeze - CATALOG_ITEM_TYPES = { - "amazon" => N_("Amazon"), - "azure" => N_("Azure"), - "generic" => N_("Generic"), - "generic_orchestration" => N_("Orchestration"), - "generic_ansible_playbook" => N_("Ansible Playbook"), - "generic_ansible_tower" => N_("Ansible Tower"), - "generic_container_template" => N_("OpenShift Template"), - "google" => N_("Google"), - "microsoft" => N_("SCVMM"), - "openstack" => N_("OpenStack"), - "redhat" => N_("Red Hat Virtualization"), - "vmware" => N_("VMware") - }.freeze - SERVICE_TYPE_ATOMIC = 'atomic'.freeze SERVICE_TYPE_COMPOSITE = 'composite'.freeze @@ -103,12 +88,28 @@ def self.with_additional_tenants references(table_name, :tenants).includes(:service_template_tenants => :tenant) end + def self.all_catalog_item_types + @all_catalog_item_types ||= begin + builtin_catalog_item_types = { + "generic" => N_("Generic"), + "generic_orchestration" => N_("Orchestration"), + } + + ExtManagementSystem.supported_types_for_catalog + .flat_map(&:catalog_types) + .reduce(builtin_catalog_item_types, :merge) + end + end + def self.catalog_item_types - ci_types = Set.new(Rbac.filtered(ExtManagementSystem.all).flat_map(&:supported_catalog_types)) + ems_classes = Rbac.filtered(ExtManagementSystem.all).collect(&:class).uniq.select(&:supported_for_catalog?) + ci_types = Set.new(ems_classes.flat_map(&:catalog_types).reduce({}, :merge).keys) + ci_types.add('generic_orchestration') if Rbac.filtered(OrchestrationTemplate).exists? ci_types.add('generic') - CATALOG_ITEM_TYPES.each.with_object({}) do |(key, description), hash| - hash[key] = { :description => description, :display => ci_types.include?(key) } + + all_catalog_item_types.each.with_object({}) do |(key, description), hash| + hash[key] = {:description => description, :display => ci_types.include?(key)} end end diff --git a/app/models/service_template_transformation_plan_task.rb b/app/models/service_template_transformation_plan_task.rb index 8c775995cb3..eb932bf848b 100644 --- a/app/models/service_template_transformation_plan_task.rb +++ b/app/models/service_template_transformation_plan_task.rb @@ -66,7 +66,8 @@ def preflight_check raise 'OSP destination and source power_state is off' if destination_ems.emstype == 'openstack' && source.power_state == 'off' update_options( :source_vm_power_state => source.power_state, # This will determine power_state of destination_vm - :source_vm_ipaddresses => source.ipaddresses # This will determine if we need to wait for ip addresses to appear + :source_vm_ipaddresses => source.ipaddresses, # This will determine if we need to wait for ip addresses to appear + :two_phase => two_phase? # This will help the UI to know how to display the data ) destination_cluster preflight_check_vm_exists_in_destination @@ -98,6 +99,10 @@ def preflight_check_vm_exists_in_destination_openstack end end + def two_phase? + source.snapshots.empty? + end + def source_cluster source.ems_cluster end @@ -271,7 +276,7 @@ def get_conversion_state updates[:virtv2v_message] = virtv2v_state['last_message']['message'] if virtv2v_state['last_message'].present? if virtv2v_state['finished'].nil? updates[:virtv2v_status] = virtv2v_state['status'] == 'Paused' ? 'paused' : 'active' - if warm_migration? + if two_phase? updated_disks = virtv2v_state['disks'] else updated_disks.each do |disk| @@ -297,7 +302,7 @@ def get_conversion_state rescue failures = options[:get_conversion_state_failures] || 0 update_options(:get_conversion_state_failures => failures + 1) - raise "Failed to get conversion state 5 times in a row" if options[:get_conversion_state_failures] > 5 + raise "Failed to get conversion state 20 times in a row" if options[:get_conversion_state_failures] > 20 ensure _log.info("InfraConversionJob get_conversion_state to update_options: #{updates}") update_options(updates) @@ -400,7 +405,7 @@ def conversion_options_source_provider_vmwarews_vddk(_storage) :query => { :no_verify => 1 }.to_query ).to_s, :vmware_password => source.host.authentication_password, - :two_phase => warm_migration?, + :two_phase => two_phase?, :warm => warm_migration?, :daemonize => false } @@ -408,15 +413,20 @@ def conversion_options_source_provider_vmwarews_vddk(_storage) def conversion_options_source_provider_vmwarews_ssh(storage) { - :vm_name => URI::Generic.build( + :vm_name => source.name, + :vm_uuid => source.uid_ems, + :conversion_host_uuid => conversion_host_resource_ref(conversion_host.resource), + :transport_method => 'ssh', + :vmware_fingerprint => source.host.thumbprint_sha1, + :vmware_uri => URI::Generic.build( :scheme => 'ssh', :userinfo => 'root', :host => source.host.miq_custom_get('TransformationIPAddress') || source.host.ipaddress, :path => "/vmfs/volumes/#{Addressable::URI.escape(storage.name)}/#{Addressable::URI.escape(source.location)}" ).to_s, - :vm_uuid => source.uid_ems, - :conversion_host_uuid => conversion_host_resource_ref(conversion_host.resource), - :transport_method => 'ssh', + :vmware_password => source.host.authentication_password, + :two_phase => two_phase?, + :warm => false, :daemonize => false } end diff --git a/app/models/storage_profile.rb b/app/models/storage_profile.rb index 5c7be1c20a3..a3f403d51fc 100644 --- a/app/models/storage_profile.rb +++ b/app/models/storage_profile.rb @@ -1,5 +1,5 @@ class StorageProfile < ApplicationRecord - belongs_to :ext_management_system + belongs_to :ext_management_system, :foreign_key => :ems_id has_many :storage_profile_storages, :dependent => :destroy has_many :storages, :through => :storage_profile_storages has_many :vms_and_templates, :dependent => :nullify diff --git a/app/models/time_profile.rb b/app/models/time_profile.rb index eb5a0dcea37..6f53522ac23 100644 --- a/app/models/time_profile.rb +++ b/app/models/time_profile.rb @@ -171,7 +171,7 @@ def self.for_user(user_id) end def self.ordered_by_desc - order("lower(description) ASC") + order(Arel.sql("lower(description) ASC")) end def self.profiles_for_user(user_id, region_id) diff --git a/app/models/vm_or_template.rb b/app/models/vm_or_template.rb index 16eef116721..43715303ce7 100644 --- a/app/models/vm_or_template.rb +++ b/app/models/vm_or_template.rb @@ -196,6 +196,17 @@ class VmOrTemplate < ApplicationRecord scope :not_orphaned, -> { where.not(:ems_id => nil).or(where(:storage_id => nil)) } scope :not_retired, -> { where(:retired => false).or(where(:retired => nil)) } + scope :from_cloud_managers, -> { where(:ext_management_system => ManageIQ::Providers::CloudManager.all) } + scope :from_infra_managers, -> { where(:ext_management_system => ManageIQ::Providers::InfraManager.all) } + + def from_cloud_manager? + ext_management_system&.kind_of?(ManageIQ::Providers::CloudManager) + end + + def from_infra_manager? + ext_management_system&.kind_of?(ManageIQ::Providers::InfraManager) + end + # The SQL form of `#registered?`, with it's inverse as well. # TODO: Vmware Specific (copied (old) TODO from #registered?) scope :registered, (lambda do diff --git a/app/models/vm_reconfigure_task.rb b/app/models/vm_reconfigure_task.rb index 1de9075707f..37f6cffb2b2 100644 --- a/app/models/vm_reconfigure_task.rb +++ b/app/models/vm_reconfigure_task.rb @@ -39,7 +39,7 @@ def self.build_message(options, key, message, modifier = nil) def self.build_disk_message(options) if options[:disk_add].present? - disk_sizes = options[:disk_add].collect { |d| d["disk_size_in_mb"].to_i.megabytes.to_s(:human_size) } + disk_sizes = options[:disk_add].collect { |d| d["disk_size_in_mb"].to_i.megabytes.to_s(:human_size) + ", Type: " + d["type"].to_s } "Add Disks: #{options[:disk_add].length} : #{disk_sizes.join(", ")} " end end diff --git a/bin/setup b/bin/setup index 722017eb1a2..ca8fbf9088c 100755 --- a/bin/setup +++ b/bin/setup @@ -14,6 +14,18 @@ EOS exit 1 end +def skip_ui_update? + ENV["CI"] +end + +def skip_database_setup? + ENV["SKIP_DATABASE_SETUP"] || ENV["CI"] +end + +def skip_test_reset? + ENV["SKIP_TEST_RESET"] +end + Dir.chdir(ManageIQ::Environment::APP_ROOT) do ManageIQ::Environment.ensure_config_files @@ -21,15 +33,17 @@ Dir.chdir(ManageIQ::Environment::APP_ROOT) do ManageIQ::Environment.install_bundler ManageIQ::Environment.bundle_update - ManageIQ::Environment.while_updating_ui do - unless ENV["SKIP_DATABASE_SETUP"] - ManageIQ::Environment.create_database - ManageIQ::Environment.migrate_database - ManageIQ::Environment.seed_database - end + ui_thread = ManageIQ::Environment.update_ui_thread unless skip_ui_update? - ManageIQ::Environment.setup_test_environment unless ENV["SKIP_TEST_RESET"] + unless skip_database_setup? + ManageIQ::Environment.create_database + ManageIQ::Environment.migrate_database + ManageIQ::Environment.seed_database end + ManageIQ::Environment.setup_test_environment unless skip_test_reset? + + ui_thread&.join + ManageIQ::Environment.clear_logs_and_temp end diff --git a/config/database.pg.yml b/config/database.pg.yml index 82ddc961d5f..f3da6eccd20 100644 --- a/config/database.pg.yml +++ b/config/database.pg.yml @@ -28,7 +28,7 @@ production: <<: *base database: vmdb_production -test: &test +test: <<: *base pool: 3 database: vmdb_test<%= ENV['TEST_ENV_NUMBER'] %> diff --git a/config/initializers/fast_gettext.rb b/config/initializers/fast_gettext.rb index 81b3c61a144..35508934264 100644 --- a/config/initializers/fast_gettext.rb +++ b/config/initializers/fast_gettext.rb @@ -4,8 +4,12 @@ # temporarily force it off while we load stuff. old_debug, $DEBUG = $DEBUG, nil begin - # consistently sort en_foo.yml *after* en.yml; to_s because pathnames - I18n.load_path += Dir[Rails.root.join('locale', '*.yml')].sort_by(&:to_s) + load_paths = Vmdb::Plugins.to_a.unshift(Rails).flat_map do |engine| + Dir.glob(engine.root.join('locale', '*.yml')) + end + load_paths.sort_by! { |p| File.basename(p) } # consistently sort en_foo.yml *after* en.yml + I18n.load_path += load_paths + Vmdb::FastGettextHelper.register_locales Vmdb::FastGettextHelper.register_human_localenames gettext_options = %w(--sort-by-msgid --location --no-wrap) diff --git a/config/initializers/permissions_repository.rb b/config/initializers/permissions_repository.rb deleted file mode 100644 index b3589332df3..00000000000 --- a/config/initializers/permissions_repository.rb +++ /dev/null @@ -1,12 +0,0 @@ -require 'vmdb/permission_stores' - -Vmdb::PermissionStores.configure do |config| - yaml_filename = Rails.root.join('config', 'permissions.yml') - if File.exist?(yaml_filename) - config.backend = 'yaml' - config.options[:filename] = yaml_filename - else - config.backend = 'null' - end -end -Vmdb::PermissionStores.initialize! diff --git a/config/messaging.artemis.yml b/config/messaging.artemis.yml new file mode 100644 index 00000000000..816fe222198 --- /dev/null +++ b/config/messaging.artemis.yml @@ -0,0 +1,15 @@ +--- +base: &base + hostname: localhost + port: 61616 + username: admin + password: smartvm + +development: + <<: *base + +production: + <<: *base + +test: + <<: *base diff --git a/config/messaging.kafka.yml b/config/messaging.kafka.yml new file mode 100644 index 00000000000..692af7a159c --- /dev/null +++ b/config/messaging.kafka.yml @@ -0,0 +1,15 @@ +--- +base: &base + hostname: localhost + port: 9092 + username: admin + password: smartvm + +development: + <<: *base + +production: + <<: *base + +test: + <<: *base diff --git a/config/settings.yml b/config/settings.yml index 0107655b14c..4fe41adadb2 100644 --- a/config/settings.yml +++ b/config/settings.yml @@ -89,6 +89,9 @@ :purge_schedule: "50 * * * *" :purge_window_size: 10000 :queue_timeout: 20.minutes +:docs: + :product_support_website: http://www.manageiq.org + :product_support_website_text: ManageIQ.org :drift_states: :history: :keep_drift_states: 6.months @@ -929,12 +932,7 @@ :run_automate_methods_on_service_api_submit: false :allow_api_service_ordering: true :prototype: - :queue_type: miq_queue - :artemis: - :queue_hostname: localhost - :queue_port: 61616 - :queue_username: admin - :queue_password: smartvm + :messaging_type: miq_queue :recommendations: :cpu_minimum: 1 :mem_minimum: 32.megabytes diff --git a/db/fixtures/tools/create_customization_templates_fixture.rb b/db/fixtures/tools/create_customization_templates_fixture.rb index 5b3fadf5e63..25f978bdee9 100644 --- a/db/fixtures/tools/create_customization_templates_fixture.rb +++ b/db/fixtures/tools/create_customization_templates_fixture.rb @@ -1,4 +1,4 @@ -recs = CustomizationTemplate.where(:system => true).order("LOWER(name)") +recs = CustomizationTemplate.where(:system => true).order(Arel.sql("LOWER(name)")) recs = recs.collect do |r| attrs = r.attributes.except("id", "created_at", "updated_at", "pxe_image_type_id").symbolize_keys attrs[:system] = true diff --git a/db/fixtures/tools/create_pxe_image_types_fixture.rb b/db/fixtures/tools/create_pxe_image_types_fixture.rb index 053136a2034..5d293fadbbe 100644 --- a/db/fixtures/tools/create_pxe_image_types_fixture.rb +++ b/db/fixtures/tools/create_pxe_image_types_fixture.rb @@ -1,4 +1,4 @@ -recs = PxeImageType.order("LOWER(name)").collect { |r| r.attributes.except("id").symbolize_keys } +recs = PxeImageType.order(Arel.sql("LOWER(name)")).collect { |r| r.attributes.except("id").symbolize_keys } File.open(PxeImageType.seed_file_name, "w") do |f| f.write(recs.to_yaml) end diff --git a/lib/container_orchestrator/object_definition.rb b/lib/container_orchestrator/object_definition.rb index 0cec9d214c3..15ce9704b02 100644 --- a/lib/container_orchestrator/object_definition.rb +++ b/lib/container_orchestrator/object_definition.rb @@ -5,9 +5,10 @@ module ObjectDefinition def deployment_definition(name) { :metadata => { - :name => name, - :labels => {:app => app_name}, - :namespace => my_namespace, + :name => name, + :labels => {:app => app_name}, + :namespace => my_namespace, + :ownerReferences => owner_references }, :spec => { :selector => {:matchLabels => {:name => name}}, @@ -15,7 +16,6 @@ def deployment_definition(name) :metadata => {:name => name, :labels => {:name => name, :app => app_name}}, :spec => { :imagePullSecrets => [{:name => ENV["IMAGE_PULL_SECRET"].to_s}], - :serviceAccountName => "#{app_name}-anyuid", :containers => [{ :name => name, :env => default_environment, @@ -30,9 +30,10 @@ def deployment_definition(name) def service_definition(name, selector, port) { :metadata => { - :name => name, - :labels => {:app => app_name}, - :namespace => my_namespace + :name => name, + :labels => {:app => app_name}, + :namespace => my_namespace, + :ownerReferences => owner_references }, :spec => { :selector => selector, @@ -48,9 +49,10 @@ def service_definition(name, selector, port) def secret_definition(name, string_data) { :metadata => { - :name => name, - :labels => {:app => app_name}, - :namespace => my_namespace + :name => name, + :labels => {:app => app_name}, + :namespace => my_namespace, + :ownerReferences => owner_references }, :stringData => string_data } @@ -62,6 +64,8 @@ def default_environment {:name => "GUID", :value => MiqServer.my_guid}, {:name => "MEMCACHED_SERVER", :value => ENV["MEMCACHED_SERVER"]}, {:name => "MEMCACHED_SERVICE_NAME", :value => ENV["MEMCACHED_SERVICE_NAME"]}, + {:name => "MESSAGING_PORT", :value => ENV["MESSAGING_PORT"]}, + {:name => "MESSAGING_TYPE", :value => ENV["MESSAGING_TYPE"]}, {:name => "WORKER_HEARTBEAT_FILE", :value => Rails.root.join("tmp", "worker.hb").to_s}, {:name => "WORKER_HEARTBEAT_METHOD", :value => "file"}, {:name => "DATABASE_HOSTNAME", @@ -73,7 +77,13 @@ def default_environment {:name => "DATABASE_USER", :valueFrom => {:secretKeyRef=>{:name => "postgresql-secrets", :key => "username"}}}, {:name => "ENCRYPTION_KEY", - :valueFrom => {:secretKeyRef=>{:name => "app-secrets", :key => "encryption-key"}}} + :valueFrom => {:secretKeyRef=>{:name => "app-secrets", :key => "encryption-key"}}}, + {:name => "MESSAGING_HOSTNAME", + :valueFrom => {:secretKeyRef=>{:name => "kafka-secrets", :key => "hostname"}}}, + {:name => "MESSAGING_PASSWORD", + :valueFrom => {:secretKeyRef=>{:name => "kafka-secrets", :key => "password"}}}, + {:name => "MESSAGING_USERNAME", + :valueFrom => {:secretKeyRef=>{:name => "kafka-secrets", :key => "username"}}} ] end @@ -93,5 +103,16 @@ def my_namespace def app_name ENV["APP_NAME"] end + + def owner_references + [{ + :apiVersion => "v1", + :blockOwnerDeletion => true, + :controller => true, + :kind => "Pod", + :name => ENV["POD_NAME"], + :uid => ENV["POD_UID"] + }] + end end end diff --git a/lib/extensions/ar_miq_set.rb b/lib/extensions/ar_miq_set.rb index f5c2231b50b..3ad9db523ca 100644 --- a/lib/extensions/ar_miq_set.rb +++ b/lib/extensions/ar_miq_set.rb @@ -67,6 +67,8 @@ module ActsAsMiqSet alias_with_relationship_type :add_members, :add_children alias_method model_table_name.to_sym, :children + + virtual_has_many model_table_name.to_sym, :uses => :all_relationships end module ClassMethods diff --git a/lib/extensions/ar_types.rb b/lib/extensions/ar_types.rb index 4468c2870b0..a7eb7822216 100644 --- a/lib/extensions/ar_types.rb +++ b/lib/extensions/ar_types.rb @@ -1,7 +1,7 @@ require 'active_record/connection_adapters/postgresql_adapter' ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.module_eval do prepend Module.new { - def initialize_type_map(m) + def initialize_type_map(m = type_map) super m.alias_type('xid', 'varchar') end diff --git a/lib/manageiq/environment.rb b/lib/manageiq/environment.rb index 1912438af0b..06f4cef6d3d 100644 --- a/lib/manageiq/environment.rb +++ b/lib/manageiq/environment.rb @@ -28,9 +28,10 @@ def self.manageiq_plugin_update(plugin_root = nil) def self.ensure_config_files config_files = { - "certs/v2_key.dev" => "certs/v2_key", - "config/cable.yml.sample" => "config/cable.yml", - "config/database.pg.yml" => "config/database.yml", + "certs/v2_key.dev" => "certs/v2_key", + "config/cable.yml.sample" => "config/cable.yml", + "config/database.pg.yml" => "config/database.yml", + "config/messaging.kafka.yml" => "config/messaging.yml", } config_files.each do |source, dest| @@ -44,8 +45,7 @@ def self.ensure_config_files Dir.mkdir(logdir) unless Dir.exist?(logdir) end - def self.while_updating_ui - # Run update:ui in a thread and continue to do the non-js stuff + def self.update_ui_thread puts "\n== Updating UI assets (in parallel) ==" ui_thread = Thread.new do @@ -54,9 +54,13 @@ def self.while_updating_ui end ui_thread.abort_on_exception = true + ui_thread + end + def self.while_updating_ui + # Run update:ui in a thread and continue to do the non-js stuff + ui_thread = update_ui_thread yield - ui_thread.join end diff --git a/lib/miq_cockpit.rb b/lib/miq_cockpit.rb index 210403d811f..3f720278ab7 100644 --- a/lib/miq_cockpit.rb +++ b/lib/miq_cockpit.rb @@ -71,7 +71,7 @@ def self.url(miq_server, opts, address) def initialize(opts = {}) @opts = opts || {} - @config_dir = File.join(__dir__, "..", "config") + @config_dir = Rails.root.join("config").to_s @cockpit_conf_dir = File.join(@config_dir, "cockpit") FileUtils.mkdir_p(@cockpit_conf_dir) end diff --git a/lib/miq_expression.rb b/lib/miq_expression.rb index dcbde2849e5..ca347ccdb3d 100644 --- a/lib/miq_expression.rb +++ b/lib/miq_expression.rb @@ -411,29 +411,6 @@ def includes_for_sql col_details.values.each_with_object({}) { |v, result| result.deep_merge!(v[:include]) } end - def self.expand_conditional_clause(klass, cond) - return klass.send(:sanitize_sql_for_conditions, cond) unless cond.kind_of?(Hash) - - cond = klass.predicate_builder.resolve_column_aliases(cond) - cond = klass.send(:expand_hash_conditions_for_aggregates, cond) - - klass.predicate_builder.build_from_hash(cond).map { |b| klass.connection.visitor.compile(b) }.join(' AND ') - end - - def self.merge_where_clauses(*list) - list = list.compact.collect do |s| - expand_conditional_clause(MiqReport, s) - end.compact - - if list.empty? - nil - elsif list.size == 1 - list.first - else - "(#{list.join(") AND (")})" - end - end - def self.get_cols_from_expression(exp, options = {}) result = {} if exp.kind_of?(Hash) diff --git a/lib/rbac/filterer.rb b/lib/rbac/filterer.rb index e17dd9fd9c3..bce700d8f96 100644 --- a/lib/rbac/filterer.rb +++ b/lib/rbac/filterer.rb @@ -414,7 +414,7 @@ def apply_rbac_directly?(klass) # the associated application model. See #rbac_class method # def apply_rbac_through_association?(klass) - klass != VimPerformanceDaily && (klass < MetricRollup || klass < Metric) + klass != VimPerformanceDaily && klass != VimPerformanceTag && (klass < MetricRollup || klass < Metric) end def rbac_base_class(klass) diff --git a/lib/services/dialog_import_service.rb b/lib/services/dialog_import_service.rb index 97e93212782..c24b2690c8c 100644 --- a/lib/services/dialog_import_service.rb +++ b/lib/services/dialog_import_service.rb @@ -69,13 +69,13 @@ def store_for_import(file_contents) def build_dialog_tabs(dialog, export_version = CURRENT_DIALOG_VERSION) dialog["dialog_tabs"].collect do |dialog_tab| - DialogTab.create(dialog_tab.merge("dialog_groups" => build_dialog_groups(dialog_tab, export_version))) + DialogTab.create!(dialog_tab.merge("dialog_groups" => build_dialog_groups(dialog_tab, export_version))) end end def build_dialog_groups(dialog_tab, export_version = CURRENT_DIALOG_VERSION) dialog_tab["dialog_groups"].collect do |dialog_group| - DialogGroup.create(dialog_group.merge("dialog_fields" => build_dialog_fields(dialog_group, export_version))) + DialogGroup.create!(dialog_group.merge("dialog_fields" => build_dialog_fields(dialog_group, export_version))) end end @@ -89,7 +89,7 @@ def build_dialog_fields(dialog_group, export_version = CURRENT_DIALOG_VERSION) def build_resource_actions(dialog) (dialog['resource_actions'] || []).collect do |resource_action| - ResourceAction.create(resource_action.merge('dialog_id' => dialog['id'])) + ResourceAction.create!(resource_action.merge('dialog_id' => dialog['id'])) end end @@ -99,12 +99,14 @@ def check_field_associations(fields) end def import(dialog) - @dialog_import_validator.determine_dialog_validity(dialog) - new_dialog = Dialog.create(dialog.except('dialog_tabs', 'export_version')) - association_list = build_association_list(dialog) - new_dialog.update!(dialog.merge('dialog_tabs' => build_dialog_tabs(dialog, dialog['export_version'] || DEFAULT_DIALOG_VERSION))) - build_associations(new_dialog, association_list) - new_dialog + ActiveRecord::Base.transaction do + @dialog_import_validator.determine_dialog_validity(dialog) + new_dialog = Dialog.create(dialog.except('dialog_tabs', 'export_version')) + association_list = build_association_list(dialog) + new_dialog.update!(dialog.merge('dialog_tabs' => build_dialog_tabs(dialog, dialog['export_version'] || DEFAULT_DIALOG_VERSION))) + build_associations(new_dialog, association_list) + new_dialog + end end def build_associations(dialog, association_list) @@ -113,7 +115,7 @@ def build_associations(dialog, association_list) association.each_value do |value| value.each do |responder| next if fields.select { |field| field.name == responder }.empty? - DialogFieldAssociation.create(:trigger_id => fields.find { |field| field.name.include?(association.keys.first) }.id, + DialogFieldAssociation.create!(:trigger_id => fields.find { |field| field.name.include?(association.keys.first) }.id, :respond_id => fields.find { |field| field.name == responder }.id) end end @@ -147,7 +149,7 @@ def import_from_dialogs(dialogs) new_or_existing_dialog = Dialog.where(:label => dialog["label"]).first_or_create dialog['id'] = new_or_existing_dialog.id new_associations = build_association_list(dialog) - new_or_existing_dialog.update( + new_or_existing_dialog.update!( dialog.except('export_version').merge( "dialog_tabs" => build_dialog_tabs(dialog, dialog['export_version'] || DEFAULT_DIALOG_VERSION), "resource_actions" => build_resource_actions(dialog) diff --git a/lib/services/git_based_domain_import_service.rb b/lib/services/git_based_domain_import_service.rb new file mode 100644 index 00000000000..be06b17c7ae --- /dev/null +++ b/lib/services/git_based_domain_import_service.rb @@ -0,0 +1,140 @@ +class GitBasedDomainImportService + def queue_import(git_repo_id, branch_or_tag, tenant_id) + git_repo = GitRepository.find_by(:id => git_repo_id) + + ref_type = if git_repo.git_branches.any? { |git_branch| git_branch.name == branch_or_tag } + "branch" + else + "tag" + end + + import_options = { + "git_repository_id" => git_repo.id, + "ref" => branch_or_tag, + "ref_type" => ref_type, + "tenant_id" => tenant_id, + "overwrite" => true + } + + task_options = { + :action => "Import git repository", + :userid => User.current_user.userid + } + + queue_options = { + :class_name => "MiqAeDomain", + :method_name => "import_git_repo", + :role => "git_owner", + :user_id => User.current_user.id, + :args => [import_options] + } + + MiqTask.generic_action_with_callback(task_options, queue_options) + end + + def queue_refresh(git_repo_id) + task_options = { + :action => "Refresh git repository", + :userid => User.current_user.userid + } + + queue_options = { + :class_name => "GitRepository", + :method_name => "refresh", + :instance_id => git_repo_id, + :role => "git_owner", + :user_id => User.current_user.id, + :args => [] + } + + MiqTask.generic_action_with_callback(task_options, queue_options) + end + + def queue_refresh_and_import(git_url, ref, ref_type, tenant_id, auth_args = {}) + import_options = { + "git_url" => git_url, + "ref" => ref, + "ref_type" => ref_type, + "tenant_id" => tenant_id, + "overwrite" => true + }.merge(prepare_auth_options(auth_args)) + + task_options = { + :action => "Refresh and import git repository", + :userid => User.current_user.userid + } + + queue_options = { + :class_name => "MiqAeDomain", + :method_name => "import_git_url", + :role => "git_owner", + :user_id => User.current_user.id, + :args => [import_options] + } + + MiqTask.generic_action_with_callback(task_options, queue_options) + end + + def queue_destroy_domain(domain_id) + task_options = { + :action => "Destroy domain", + :userid => User.current_user.userid + } + + queue_options = { + :class_name => "MiqAeDomain", + :method_name => "destroy", + :instance_id => domain_id, + :role => "git_owner", + :user_id => User.current_user.id, + :args => [] + } + + MiqTask.generic_action_with_callback(task_options, queue_options) + end + + def import(git_repo_id, branch_or_tag, tenant_id) + task_id = queue_import(git_repo_id, branch_or_tag, tenant_id) + task = MiqTask.wait_for_taskid(task_id) + + domain = task.task_results + raise MiqException::Error, task.message unless domain.kind_of?(MiqAeDomain) + + domain.update(:enabled => true) + end + + def refresh(git_repo_id) + task_id = queue_refresh(git_repo_id) + task = MiqTask.wait_for_taskid(task_id) + + raise MiqException::Error, task.message unless task.status == "Ok" + + task.task_results + end + + def destroy_domain(domain_id) + task_id = queue_destroy_domain(domain_id) + task = MiqTask.wait_for_taskid(task_id) + + raise MiqException::Error, task.message unless task.status == "Ok" + + task.task_results + end + + def self.available? + MiqRegion.my_region.role_active?("git_owner") + end + + private + + def prepare_auth_options(auth_args) + auth_args.stringify_keys! + + auth_options = {} + auth_options["password"] = ManageIQ::Password.try_encrypt(auth_args["password"]) unless auth_args["password"].nil? + auth_options["userid"] = auth_args["userid"] unless auth_args["userid"].nil? + auth_options["verify_ssl"] = auth_args["verify_ssl"] unless auth_args["verify_ssl"].nil? + + auth_options + end +end diff --git a/lib/tasks/test.rake b/lib/tasks/test.rake index 97c9cffc6f8..3b099d47341 100644 --- a/lib/tasks/test.rake +++ b/lib/tasks/test.rake @@ -3,9 +3,13 @@ require_relative './evm_test_helper' if defined?(RSpec) namespace :test do task :initialize do - ENV['RAILS_ENV'] ||= "test" - Rails.env = ENV['RAILS_ENV'] if defined?(Rails) - ENV['VERBOSE'] ||= "false" + if ENV['RAILS_ENV'] && ENV["RAILS_ENV"] != "test" + warn "Warning: RAILS_ENV is currently set to '#{ENV["RAILS_ENV"]}'. Forcing to 'test' for this run." + end + ENV['RAILS_ENV'] = "test" + Rails.env = 'test' if defined?(Rails) + + ENV['VERBOSE'] ||= "false" end task :verify_no_db_access_loading_rails_environment do diff --git a/lib/unique_within_region_validator.rb b/lib/unique_within_region_validator.rb index 25165c24a0f..ee90e944f51 100644 --- a/lib/unique_within_region_validator.rb +++ b/lib/unique_within_region_validator.rb @@ -1,3 +1,16 @@ +# +# Validates that a record is unique within the region number +# +# Options: +# :match_case: Whether or not the uniqueness check should be case sensitive +# (default: true) +# :scope: An attribute used to further limit the scope of the uniqueness check +# +# Examples: +# validates :name, :unique_within_region => true +# validates :name, :unique_within_region => {:match_case => false} +# validates :name, :unique_within_region => {:scope => :dialog_type, :match_case => false} +# class UniqueWithinRegionValidator < ActiveModel::EachValidator def validate_each(record, attribute, value) return if value.nil? diff --git a/lib/vmdb/fast_gettext_helper.rb b/lib/vmdb/fast_gettext_helper.rb index 63bc36c857b..ad57e97edc8 100644 --- a/lib/vmdb/fast_gettext_helper.rb +++ b/lib/vmdb/fast_gettext_helper.rb @@ -30,20 +30,18 @@ def self.supported_locales # - it # - nl # - YAML.load_file(supported_locales_filename) + @supported_locales ||= supported_locales_files.flat_map { |file| YAML.load_file(file) } end - def self.supported_locales_filename - @supported_locales_filename ||= Rails.root.join("config", "supported_locales.yml") - end - - def self.supported_locales_specified? - File.exist?(supported_locales_filename) + private_class_method def self.supported_locales_files + Vmdb::Plugins.to_a.unshift(Rails) + .map { |source| source.root.join("config", "supported_locales.yml") } + .select(&:exist?) end def self.find_available_locales available_locales = find_available_locales_via_directories - available_locales &= supported_locales if supported_locales_specified? + available_locales &= supported_locales if supported_locales.any? available_locales end diff --git a/lib/vmdb/permission_stores.rb b/lib/vmdb/permission_stores.rb index 2acbf77c826..1d3d50bfe03 100644 --- a/lib/vmdb/permission_stores.rb +++ b/lib/vmdb/permission_stores.rb @@ -1,34 +1,33 @@ -module Vmdb - module PermissionStores - class Configuration - attr_accessor :backend - attr_accessor :options +require "yaml" - def initialize - @options = {} - end +module Vmdb + class PermissionStores + def self.instance + @instance ||= new(blacklist) + end - def create - PermissionStores.create(self) - end + def self.blacklist + permission_files.flat_map { |file| YAML.load_file(file) } + end - def load - require "vmdb/permission_stores/#{backend}" - end + private_class_method def self.permission_files + Vmdb::Plugins.to_a.unshift(Rails) + .map { |source| source.root.join("config", "permissions.yml") } + .select(&:exist?) end - class << self - attr_accessor :configuration, :instance + attr_reader :blacklist + + def initialize(blacklist) + @blacklist = blacklist end - def self.configure - @configuration = Configuration.new - yield @configuration + def can?(permission) + blacklist.exclude?(permission.to_s) end - def self.initialize! - @configuration.load - @instance = @configuration.create + def supported_ems_type?(type) + can?("ems-type:#{type}") end end end diff --git a/lib/vmdb/permission_stores/null.rb b/lib/vmdb/permission_stores/null.rb deleted file mode 100644 index 1707d4caab0..00000000000 --- a/lib/vmdb/permission_stores/null.rb +++ /dev/null @@ -1,17 +0,0 @@ -module Vmdb - module PermissionStores - def self.create(_config) - Null.new - end - - class Null - def can?(_permission) - true - end - - def supported_ems_type?(_type) - true - end - end - end -end diff --git a/lib/vmdb/permission_stores/yaml.rb b/lib/vmdb/permission_stores/yaml.rb deleted file mode 100644 index e9a5d50aab9..00000000000 --- a/lib/vmdb/permission_stores/yaml.rb +++ /dev/null @@ -1,23 +0,0 @@ -require 'psych' - -module Vmdb - module PermissionStores - def self.create(config) - YAML.new(config.options[:filename]) - end - - class YAML - def initialize(file) - @blacklist = Psych.load_file(file) - end - - def can?(permission) - !@blacklist.include?(permission) - end - - def supported_ems_type?(type) - can?("ems-type:#{type}") - end - end - end -end diff --git a/spec/factories/ext_management_system.rb b/spec/factories/ext_management_system.rb index c620c80d7bb..9dda77e4c92 100644 --- a/spec/factories/ext_management_system.rb +++ b/spec/factories/ext_management_system.rb @@ -1,6 +1,11 @@ FactoryBot.define do factory :ext_management_system, :class => "ManageIQ::Providers::Vmware::InfraManager" do + # The provider has to be set before the hostname/ipaddress sequences as in some cases these attributes + # might be delegated to the provider. As the attributes are being set based on the order in this file, + # it is important to keep this line at the beginning of the factory. + provider { nil } + sequence(:name) { |n| "ems_#{seq_padded_for_sorting(n)}" } sequence(:hostname) { |n| "ems-#{seq_padded_for_sorting(n)}" } sequence(:ipaddress) { |n| ip_from_seq(n) } @@ -92,8 +97,6 @@ parent_manager { FactoryBot.create(:ext_management_system) } end - - factory :ems_storage, :aliases => ["manageiq/providers/storage_manager"], :class => "ManageIQ::Providers::StorageManager::SwiftManager", @@ -123,7 +126,9 @@ factory :configuration_manager, :aliases => ["manageiq/providers/configuration_manager"], :class => "ManageIQ::Providers::Foreman::ConfigurationManager", - :parent => :ext_management_system + :parent => :ext_management_system do + provider :factory => :provider + end # Automation managers @@ -135,7 +140,9 @@ factory :provisioning_manager, :aliases => ["manageiq/providers/provisioning_manager"], :class => "ManageIQ::Providers::Foreman::ProvisioningManager", - :parent => :ext_management_system + :parent => :ext_management_system do + provider :factory => :provider + end # Leaf classes for ems_infra @@ -333,7 +340,6 @@ end end - factory :ems_openshift, :aliases => ["manageiq/providers/openshift/container_manager"], :class => "ManageIQ::Providers::Openshift::ContainerManager", @@ -344,7 +350,9 @@ factory :configuration_manager_foreman, :aliases => ["manageiq/providers/foreman/configuration_manager"], :class => "ManageIQ::Providers::Foreman::ConfigurationManager", - :parent => :configuration_manager + :parent => :configuration_manager do + provider :factory => :provider_foreman + end trait(:provider) do after(:build, &:create_provider) @@ -376,5 +384,7 @@ factory :provisioning_manager_foreman, :aliases => ["manageiq/providers/foreman/provisioning_manager"], :class => "ManageIQ::Providers::Foreman::ProvisioningManager", - :parent => :provisioning_manager + :parent => :provisioning_manager do + provider :factory => :provider_foreman + end end diff --git a/spec/lib/miq_expression_spec.rb b/spec/lib/miq_expression_spec.rb index 0db9e1e3736..6ca57b0fbff 100644 --- a/spec/lib/miq_expression_spec.rb +++ b/spec/lib/miq_expression_spec.rb @@ -2441,45 +2441,6 @@ end end - describe ".merge_where_clauses" do - it "returns nil for nil" do - expect(MiqExpression.merge_where_clauses(nil)).to be_nil - end - - it "returns nil for blank" do - expect(MiqExpression.merge_where_clauses("")).to be_nil - end - - it "returns nil for multiple empty arrays" do - expect(MiqExpression.merge_where_clauses([],[])).to be_nil - end - - it "returns same string single results" do - expect(MiqExpression.merge_where_clauses("a=5")).to eq("a=5") - end - - it "returns same string when concatinating blank results" do - expect(MiqExpression.merge_where_clauses("a=5", [])).to eq("a=5") - end - - # would be nice if we returned a hash - it "returns a string if the only argument is a hash" do - expect(MiqExpression.merge_where_clauses({"vms.id" => 5})).to eq("\"vms\".\"id\" = 5") - end - - it "concatinates 2 arrays" do - expect(MiqExpression.merge_where_clauses(["a=?",5], ["b=?",5])).to eq("(a=5) AND (b=5)") - end - - it "concatinates 2 string" do - expect(MiqExpression.merge_where_clauses("a=5", "b=5")).to eq("(a=5) AND (b=5)") - end - - it "concatinates a string and a hash" do - expect(MiqExpression.merge_where_clauses("a=5", {"vms.id" => 5})).to eq("(a=5) AND (\"vms\".\"id\" = 5)") - end - end - describe ".parse_field_or_tag" do subject { described_class.parse_field_or_tag(@field).try(:column_type) } let(:string_custom_attribute) do diff --git a/spec/lib/rbac/filterer_spec.rb b/spec/lib/rbac/filterer_spec.rb index e65670a4b93..128e4e35c05 100644 --- a/spec/lib/rbac/filterer_spec.rb +++ b/spec/lib/rbac/filterer_spec.rb @@ -2622,6 +2622,7 @@ def get_rbac_results_for_and_expect_objects(klass, expected_objects) it ".apply_rbac_through_association?" do expect(described_class.new.send(:apply_rbac_through_association?, HostMetric)).to be_truthy expect(described_class.new.send(:apply_rbac_through_association?, Vm)).not_to be + expect(described_class.new.send(:apply_rbac_through_association?, VimPerformanceTag)).not_to be end describe "find_targets_with_direct_rbac" do diff --git a/spec/lib/services/dialog_import_service_spec.rb b/spec/lib/services/dialog_import_service_spec.rb index bb99ac4f014..3a9c7f4d5a0 100644 --- a/spec/lib/services/dialog_import_service_spec.rb +++ b/spec/lib/services/dialog_import_service_spec.rb @@ -517,6 +517,9 @@ expect do dialog_import_service.import(dialogs.first) end.to raise_error(ActiveRecord::RecordInvalid, /Validation failed: Dialog: Name is not unique within region/) + .and change { DialogTab.count }.by(0) + .and change { DialogGroup.count }.by(0) + .and change { DialogField.count }.by(0) end end diff --git a/spec/lib/services/git_based_domain_import_service_spec.rb b/spec/lib/services/git_based_domain_import_service_spec.rb new file mode 100644 index 00000000000..7e2b54f3c0d --- /dev/null +++ b/spec/lib/services/git_based_domain_import_service_spec.rb @@ -0,0 +1,303 @@ +describe GitBasedDomainImportService do + shared_context "import setup" do + let(:git_repo) do + double("GitRepository", :git_branches => git_branches, + :id => 123, + :url => 'http://www.example.com') + end + let(:user) { double("User", :userid => userid, :id => 123) } + let(:domain) { FactoryBot.build(:miq_ae_git_domain, :id => 999) } + let(:userid) { "fred" } + let(:task) { double("MiqTask", :id => 123) } + let(:ref_name) { 'the_branch_name' } + let(:ref_type) { 'branch' } + let(:method_name) { 'import_git_repo' } + let(:action) { 'Import git repository' } + let(:task_options) { {:action => action, :userid => userid} } + let(:queue_options) do + { + :class_name => "MiqAeDomain", + :method_name => method_name, + :role => "git_owner", + :user_id => 123, + :args => [import_options] + } + end + let(:import_options) do + { + "git_repository_id" => git_repo.id, + "ref" => ref_name, + "ref_type" => ref_type, + "tenant_id" => 321, + "overwrite" => true + } + end + let(:status) { "Ok" } + let(:message) { "Success" } + end + + shared_context "repository setup" do + let(:git_branches) { [] } + let(:queue_options) do + { + :class_name => "GitRepository", + :instance_id => git_repo.id, + :method_name => method_name, + :role => "git_owner", + :user_id => 123, + :args => [] + } + end + end + + shared_context "domain setup" do + let(:git_branches) { [] } + let(:queue_options) do + { + :class_name => "MiqAeDomain", + :instance_id => domain.id, + :method_name => method_name, + :role => "git_owner", + :user_id => 123, + :args => [] + } + end + end + + describe "#import" do + include_context "import setup" + before do + allow(GitRepository).to receive(:find_by).with(:id => git_repo.id).and_return(git_repo) + allow(domain).to receive(:update_attribute).with(:enabled, true) + allow(MiqTask).to receive(:wait_for_taskid).with(task.id).and_return(task) + allow(User).to receive(:current_user).and_return(user) + allow(task).to receive(:message).and_return(nil) + end + + context "when git branches that match the given name exist" do + let(:git_branches) { [double("GitBranch", :name => ref_name)] } + + it "calls 'import' with the correct options" do + allow(task).to receive(:task_results).and_return(domain) + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, queue_options).and_return(task.id) + expect(domain).to receive(:update).with(:enabled => true) + + subject.import(git_repo.id, ref_name, 321) + end + end + + context "when git branches that match the given name do not exist" do + let(:git_branches) { [] } + let(:ref_name) { "the_tag_name" } + let(:ref_type) { "tag" } + + it "calls 'import' with the correct options" do + allow(task).to receive(:task_results).and_return(domain) + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, queue_options).and_return(task.id) + expect(domain).to receive(:update).with(:enabled => true) + subject.import(git_repo.id, ref_name, 321) + end + end + + context "when import fails and the task result is nil" do + let(:git_branches) { [double("GitBranch", :name => ref_name)] } + + it "raises an exception with a message about invalid domain" do + allow(task).to receive(:task_results).and_return(nil) + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect { subject.import(git_repo.id, ref_name, 321) }.to raise_exception( + MiqException::Error, "MiqException::Error" + ) + end + + it "raises an exception with a message about multiple domains" do + allow(task).to receive(:task_results).and_return(nil) + allow(task).to receive(:message).and_return('multiple domains') + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect { subject.import(git_repo.id, ref_name, 321) }.to raise_exception( + MiqException::Error, 'multiple domains' + ) + end + end + end + + describe "#queue_import" do + include_context "import setup" + before do + allow(GitRepository).to receive(:find_by).with(:id => git_repo.id).and_return(git_repo) + allow(User).to receive(:current_user).and_return(user) + allow(task).to receive(:message).and_return(nil) + end + + context "when git branches that match the given name exist" do + let(:git_branches) { [double("GitBranch", :name => ref_name)] } + + it "calls 'queue_import' with the correct options" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_import(git_repo.id, ref_name, 321)).to eq(task.id) + end + end + + context "when git branches that match the given name do not exist" do + let(:git_branches) { [] } + let(:ref_name) { "the_tag_name" } + let(:ref_type) { "tag" } + + it "calls 'queue_import' with the correct options" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_import(git_repo.id, ref_name, 321)).to eq(task.id) + end + end + end + + describe "#queue_refresh_and_import" do + include_context "import setup" + before do + allow(User).to receive(:current_user).and_return(user) + allow(task).to receive(:message).and_return(nil) + end + + let(:git_branches) { [] } + let(:ref_name) { "the_tag_name" } + let(:ref_type) { "tag" } + let(:method_name) { 'import_git_url' } + let(:action) { 'Refresh and import git repository' } + + context "when git branches that match the given name do not exist" do + let(:import_options) do + { + "git_url" => git_repo.url, + "ref" => ref_name, + "ref_type" => ref_type, + "tenant_id" => 321, + "overwrite" => true + } + end + + it "calls 'queue_import' with the correct options" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_refresh_and_import(git_repo.url, ref_name, ref_type, 321)).to eq(task.id) + end + end + + context "when auth args are provided" do + let(:import_options) do + { + "git_url" => git_repo.url, + "ref" => ref_name, + "ref_type" => ref_type, + "tenant_id" => 321, + "overwrite" => true, + "userid" => "bob", + "verify_ssl" => false + } + end + + it "calls 'queue_import' with additional auth args using stringified keys" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_refresh_and_import(git_repo.url, ref_name, ref_type, 321, "userid" => "bob", :verify_ssl => false)).to eq(task.id) + end + end + + context "when a password is provided" do + let(:import_options) do + { + "git_url" => git_repo.url, + "ref" => ref_name, + "ref_type" => ref_type, + "tenant_id" => 321, + "overwrite" => true, + "userid" => "bob", + "password" => ManageIQ::Password.try_encrypt("secret") + } + end + + it "calls 'queue_import' with an encrypted password" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_refresh_and_import(git_repo.url, ref_name, ref_type, 321, "userid" => "bob", :password => "secret")).to eq(task.id) + end + end + end + + describe "#queue_refresh" do + include_context "import setup" + include_context "repository setup" + let(:action) { 'Refresh git repository' } + let(:method_name) { 'refresh' } + before do + allow(User).to receive(:current_user).and_return(user) + end + + it "calls 'queue_refresh' with the correct options" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_refresh(git_repo.id)).to eq(task.id) + end + end + + describe "#refresh" do + include_context "import setup" + include_context "repository setup" + let(:action) { 'Refresh git repository' } + let(:method_name) { 'refresh' } + let(:task) { double("MiqTask", :id => 123, :status => status, :message => message) } + + before do + allow(MiqTask).to receive(:wait_for_taskid).with(task.id).and_return(task) + allow(MiqTask).to receive(:find).with(task.id).and_return(task) + allow(User).to receive(:current_user).and_return(user) + end + + context "success" do + it "calls 'refresh' with the correct options and succeeds" do + allow(task).to receive(:task_results).and_return(true) + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.refresh(git_repo.id)).to be_truthy + end + end + + context "failure" do + let(:status) { "Failed" } + let(:message) { "My Error Message" } + it "calls 'refresh' with the correct options and fails" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect { subject.refresh(git_repo.id) }.to raise_exception(MiqException::Error, message) + end + end + end + + describe "destroy domain" do + include_context "import setup" + include_context "domain setup" + let(:action) { 'Destroy domain' } + let(:method_name) { 'destroy' } + let(:task) { double("MiqTask", :id => 123, :status => status, :message => message) } + + before do + allow(MiqTask).to receive(:wait_for_taskid).with(task.id).and_return(task) + allow(User).to receive(:current_user).and_return(user) + allow(task).to receive(:task_results).and_return(true) + end + + it "#destroy_domain" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.destroy_domain(domain.id)).to be_truthy + end + + it "#queue_destroy_domain" do + expect(MiqTask).to receive(:generic_action_with_callback).with(task_options, hash_including(queue_options)).and_return(task.id) + + expect(subject.queue_destroy_domain(domain.id)).to eq(task.id) + end + end +end diff --git a/spec/lib/vmdb/permission_stores_spec.rb b/spec/lib/vmdb/permission_stores_spec.rb index 8308c4a0a7d..25d0e4325bd 100644 --- a/spec/lib/vmdb/permission_stores_spec.rb +++ b/spec/lib/vmdb/permission_stores_spec.rb @@ -1,67 +1,27 @@ RSpec.describe Vmdb::PermissionStores do - it 'should be configurable' do - stub_vmdb_permission_store do - Vmdb::PermissionStores.configure do |config| - config.backend = 'yaml' - config.options[:filename] = 'some file' - end - config = Vmdb::PermissionStores.configuration + context "when no blacklist is present" do + let(:instance) { described_class.new([]) } - expect(config.backend).to eq('yaml') - expect(config.options[:filename]).to eq('some file') + it "#can?" do + expect(instance.can?('some_feature')).to be_truthy end - end - - context 'configuration' do - it 'requires the backend' do - stub_vmdb_permission_store do - required_file = nil - - klass = Class.new(Vmdb::PermissionStores::Configuration) do - define_method(:require) do |file| - required_file = file - end - end - - config = klass.new - config.backend = 'yaml' - config.load - expect(required_file).to eq('vmdb/permission_stores/yaml') - end - end - - it 'can initialize the yaml back end' do - stub_vmdb_permission_store do - Tempfile.create(%w(config yml)) do |f| - f.write(['foo'].to_yaml) - f.close - - config = Vmdb::PermissionStores::Configuration.new - config.backend = 'yaml' - config.options[:filename] = f.path - config.load - expect(config.create).to be_truthy - end - end + it "#supported_ems_type?" do + expect(instance.supported_ems_type?('some_ems_type')).to be_truthy end end - describe '::YAML' do - it '#can?' do - stub_vmdb_permission_store_with_types(["bar"]) do - instance = Vmdb::PermissionStores.instance - expect(instance.can?('foo')).to be_truthy - expect(instance.can?('bar')).to be_falsey - end + context "when a blacklist is present" do + let(:instance) { described_class.new(["blacklisted_feature", "ems-type:blacklisted_provider"]) } + + it "#can?" do + expect(instance.can?('some_feature')).to be_truthy + expect(instance.can?('blacklisted_feature')).to be_falsey end - it '#supported_ems_type?' do - stub_vmdb_permission_store_with_types(["ems-type:bar"]) do - instance = Vmdb::PermissionStores.instance - expect(instance.supported_ems_type?('foo')).to be_truthy - expect(instance.supported_ems_type?('bar')).to be_falsey - end + it "#supported_ems_type?" do + expect(instance.supported_ems_type?('some_ems_type')).to be_truthy + expect(instance.supported_ems_type?('blacklisted_provider')).to be_falsey end end end diff --git a/spec/models/authenticator/httpd_spec.rb b/spec/models/authenticator/httpd_spec.rb index d83f3a54fe4..648ddace219 100644 --- a/spec/models/authenticator/httpd_spec.rb +++ b/spec/models/authenticator/httpd_spec.rb @@ -272,6 +272,18 @@ def authenticate 'X-Remote-User-Email' => 'Sally@example.com') end + context "with a race condition on create user" do + before do + authenticate + end + + it "update the exiting user" do + allow(User).to receive(:lookup_by_userid).and_return(nil) + allow(User).to receive(:in_my_region).and_return(User.none, User.all) + expect { authenticate }.not_to(change { User.where(:userid => 'sally@example.com').count }.from(1)) + end + end + context "when user record with userid in upn format already exists" do let!(:sally_username) { FactoryBot.create(:user, :userid => 'sAlly') } let!(:sally_dn) { FactoryBot.create(:user, :userid => dn) } diff --git a/spec/models/classification_spec.rb b/spec/models/classification_spec.rb index a27f6862f33..c5d928af3fc 100644 --- a/spec/models/classification_spec.rb +++ b/spec/models/classification_spec.rb @@ -278,8 +278,8 @@ expect(all_tagged_with(Host.order('name'), ent11.name, ent11.parent.name)).to eq([host1]) expect(any_tagged_with(Host.order('name'), ent11.name, ent11.parent.name)).to eq([host1]) - expect(all_tagged_with(Host.order('lower(name)'), ent11.name, ent11.parent.name)).to eq([host1]) - expect(any_tagged_with(Host.order('lower(name)'), ent11.name, ent11.parent.name)).to eq([host1]) + expect(all_tagged_with(Host.order(Arel.sql('lower(name)')), ent11.name, ent11.parent.name)).to eq([host1]) + expect(any_tagged_with(Host.order(Arel.sql('lower(name)')), ent11.name, ent11.parent.name)).to eq([host1]) expect(all_tagged_with(Host.order(Host.arel_table[:name].lower), ent11.name, ent11.parent.name)).to eq([host1]) expect(any_tagged_with(Host.order(Host.arel_table[:name].lower), ent11.name, ent11.parent.name)).to eq([host1]) diff --git a/spec/models/conversion_host/configurations_spec.rb b/spec/models/conversion_host/configurations_spec.rb index 809f1c28289..57226988541 100644 --- a/spec/models/conversion_host/configurations_spec.rb +++ b/spec/models/conversion_host/configurations_spec.rb @@ -90,11 +90,11 @@ conversion_host.disable end - it "to fail and send notification" do + it "to raise if active tasks exist" do expected_notify[:type] = :conversion_host_config_failure - allow(conversion_host).to receive(:disable_conversion_host_role).and_raise + FactoryBot.create(:service_template_transformation_plan_task, :conversion_host => conversion_host, :state => 'migrate') expect(Notification).to receive(:create).with(expected_notify) - expect { conversion_host.disable }.to raise_error(StandardError) + expect { conversion_host.disable }.to raise_error(StandardError, "There are active migration tasks running on this conversion host") end it "tags the associated resource as expected" do @@ -117,12 +117,20 @@ context ".enable_queue" do let(:op) { 'enable' } + it "raises if resource has no hostname nor IP address" do + allow(vm).to receive(:hostname).and_return(nil) + allow(vm).to receive(:ipaddresses).and_return([]) + expect { described_class.enable_queue(:resource => vm) }.to raise_error("Vm '#{vm.name}' doesn't have a hostname or IP address in inventory") + end + it "raises an error if the resource is already configured as a conversion host" do + allow(vm).to receive(:ipaddresses).and_return(['10.0.0.1']) FactoryBot.create(:conversion_host, :resource => vm) expect { described_class.enable_queue(:resource => vm) }.to raise_error("the resource '#{vm.name}' is already configured as a conversion host") end it "to queue with a task" do + allow(vm).to receive(:ipaddresses).and_return(['10.0.0.1']) task_id = described_class.enable_queue(params) expected_context_data = {:request_params => params.except(:resource)} @@ -138,6 +146,7 @@ end it "rejects ssh key information as context data" do + allow(vm).to receive(:ipaddresses).and_return(['10.0.0.1']) task_id = described_class.enable_queue(params.merge(:conversion_host_ssh_private_key => 'xxx', :vmware_ssh_private_key => 'yyy')) expected_context_data = {:request_params => params.except(:resource)} diff --git a/spec/models/conversion_host_spec.rb b/spec/models/conversion_host_spec.rb index 9dd67ebbf6b..74cb5b36761 100644 --- a/spec/models/conversion_host_spec.rb +++ b/spec/models/conversion_host_spec.rb @@ -488,7 +488,7 @@ authentication = FactoryBot.create(:authentication_ssh_keypair) conversion_host_vm.authentications << authentication allow(Net::SSH).to receive(:start).and_raise(Net::SSH::AuthenticationFailed.new) - expect { conversion_host_vm.verify_credentials }.to raise_error(MiqException::MiqInvalidCredentialsError) + expect { conversion_host_vm.verify_credentials }.to raise_error(Net::SSH::AuthenticationFailed, /Incorrect credentials/) end it "works if there are multiple associated validations" do @@ -520,7 +520,7 @@ end it "works as expected if the connection is unsuccessful" do - allow(conversion_host).to receive(:connect_ssh).and_raise(MiqException::MiqInvalidCredentialsError) + allow(conversion_host).to receive(:connect_ssh).and_raise(Net::SSH::AuthenticationFailed) expected_message = "Failed to connect and prepare conversion for task '#{task.id}'" expect { conversion_host.prepare_conversion(task.id, conversion_options) }.to raise_error(/#{expected_message}/) end @@ -581,7 +581,7 @@ let(:filtered_options) { conversion_options.clone.update(:ssh_key => '__FILTERED__', :password => '__FILTERED__') } it "works as expected if the connection is unsuccessful" do - allow(conversion_host).to receive(:prepare_conversion).and_raise(MiqException::MiqInvalidCredentialsError) + allow(conversion_host).to receive(:prepare_conversion).and_raise(Net::SSH::AuthenticationFailed) expected_message = "Failed to connect and run conversion using options #{filtered_options}" expect { conversion_host.run_conversion(task.id, conversion_options) }.to raise_error(/#{expected_message}/) end @@ -610,7 +610,7 @@ end it "works as expected if the connection is unsuccessful" do - allow(conversion_host).to receive(:connect_ssh).and_raise(MiqException::MiqInvalidCredentialsError) + allow(conversion_host).to receive(:connect_ssh).and_raise(Net::SSH::AuthenticationFailed) expected_message = "Failed to connect and retrieve conversion state data from file '\/var\/lib\/uci\/#{task.id}\/state.json'" expect { conversion_host.get_conversion_state(task.id) }.to raise_error(/#{expected_message}/) end @@ -640,7 +640,7 @@ end it "works as expected if the connection is unsuccessful" do - allow(conversion_host).to receive(:connect_ssh).and_raise(MiqException::MiqInvalidCredentialsError) + allow(conversion_host).to receive(:connect_ssh).and_raise(Net::SSH::AuthenticationFailed) expected_message = "Failed to connect and apply limits for task '#{task.id}'" expect { conversion_host.apply_task_limits(task.id, limits) }.to raise_error(/#{expected_message}/) end diff --git a/spec/models/ems_event_spec.rb b/spec/models/ems_event_spec.rb index 99884b24c7c..7a09e754aa2 100644 --- a/spec/models/ems_event_spec.rb +++ b/spec/models/ems_event_spec.rb @@ -120,28 +120,48 @@ } end - context "queue_type: artemis" do - before { stub_settings_merge(:prototype => {:queue_type => 'artemis'}) } + context "messaging_type: artemis" do + before { stub_settings_merge(:prototype => {:messaging_type => 'artemis'}) } it "Adds event to Artemis queue" do - queue_client = double("ManageIQ::Messaging") + messaging_client = double("ManageIQ::Messaging") expected_queue_payload = { - :service => "events", + :service => "manageiq.ems-events", :sender => ems.id, :event => event_hash[:event_type], :payload => event_hash, } - expect(queue_client).to receive(:publish_topic).with(expected_queue_payload) - expect(MiqQueue).to receive(:artemis_client).with('event_handler').and_return(queue_client) + expect(messaging_client).to receive(:publish_topic).with(expected_queue_payload) + expect(MiqQueue).to receive(:messaging_client).with('event_handler').and_return(messaging_client) described_class.add_queue('add', ems.id, event_hash) end end - context "queue_type: miq_queue" do - before { stub_settings_merge(:prototype => {:queue_type => 'miq_queue'}) } + context "messaging_type: kafka" do + before { stub_settings_merge(:prototype => {:messaging_type => 'kafka'}) } + + it "Adds event to Kafka topic" do + messaging_client = double("ManageIQ::Messaging") + + expected_queue_payload = { + :service => "manageiq.ems-events", + :sender => ems.id, + :event => event_hash[:event_type], + :payload => event_hash, + } + + expect(messaging_client).to receive(:publish_topic).with(expected_queue_payload) + expect(MiqQueue).to receive(:messaging_client).with('event_handler').and_return(messaging_client) + + described_class.add_queue('add', ems.id, event_hash) + end + end + + context "messaging_type: miq_queue" do + before { stub_settings_merge(:prototype => {:messaging_type => 'miq_queue'}) } it "Adds event to MiqQueue" do expected_queue_payload = { diff --git a/spec/models/ext_management_system_spec.rb b/spec/models/ext_management_system_spec.rb index 60db0367d87..94e23f8b342 100644 --- a/spec/models/ext_management_system_spec.rb +++ b/spec/models/ext_management_system_spec.rb @@ -86,8 +86,16 @@ expect(described_class.types).to match_array(all_types_and_descriptions.keys) end - it ".supported_types" do - expect(described_class.supported_types).to match_array(all_types_and_descriptions.keys) + describe ".supported_types" do + it "with default permissions" do + expect(described_class.supported_types).to match_array(all_types_and_descriptions.keys) + end + + it "with removed permissions" do + allow(Vmdb::PermissionStores.instance).to receive(:supported_ems_type?).and_return(true) + allow(Vmdb::PermissionStores.instance).to receive(:supported_ems_type?).with("vmwarews").and_return(false) + expect(described_class.supported_types).not_to include("vmwarews") + end end describe ".supported_types_and_descriptions_hash" do @@ -96,9 +104,9 @@ end it "with removed permissions" do - stub_vmdb_permission_store_with_types(["ems-type:vmwarews"]) do - expect(described_class.supported_types_and_descriptions_hash).to_not include("vmwarews") - end + allow(Vmdb::PermissionStores.instance).to receive(:supported_ems_type?).and_return(true) + allow(Vmdb::PermissionStores.instance).to receive(:supported_ems_type?).with("vmwarews").and_return(false) + expect(described_class.supported_types_and_descriptions_hash).to_not include("vmwarews") end end @@ -130,8 +138,10 @@ expect { ManageIQ::Providers::CloudManager.new(:hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate! }.to raise_error(ActiveRecord::RecordInvalid) expect { ManageIQ::Providers::AutomationManager.new(:hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate! }.to raise_error(ActiveRecord::RecordInvalid) expect(ManageIQ::Providers::Vmware::InfraManager.new(:hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate!).to eq(true) - expect(ManageIQ::Providers::Foreman::ConfigurationManager.new(:hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate!).to eq(true) - expect(ManageIQ::Providers::Foreman::ProvisioningManager.new(:hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate!).to eq(true) + + foreman_provider = ManageIQ::Providers::Foreman::Provider.new + expect(ManageIQ::Providers::Foreman::ConfigurationManager.new(:provider => foreman_provider, :hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate!).to eq(true) + expect(ManageIQ::Providers::Foreman::ProvisioningManager.new(:provider => foreman_provider, :hostname => "abc", :name => "abc", :zone => FactoryBot.build(:zone)).validate!).to eq(true) end context "#ipaddress / #ipaddress=" do @@ -310,6 +320,34 @@ end end + describe "refresh" do + let(:ems) { FactoryBot.create(:ext_management_system) } + + it "raises an error if the authentication check fails" do + allow(ems).to receive(:missing_credentials?).and_return(false) + allow(ems).to receive(:authentication_status_ok?).and_return(false) + + expect { ems.refresh }.to raise_error(RuntimeError, "Provider failed last authentication check") + end + + it "raises an error if no provider credentials are defined" do + allow(ems).to receive(:authentication_status_ok?).and_return(true) + allow(ems).to receive(:missing_credentials?).and_return(true) + + expect { ems.refresh }.to raise_error(RuntimeError, "no Provider credentials defined") + end + + it "calls the EmsRefresh.refresh method internally" do + allow(ems).to receive(:missing_credentials?).and_return(false) + allow(ems).to receive(:authentication_status_ok?).and_return(true) + allow(EmsRefresh).to receive(:refresh) + + ems.refresh + + expect(EmsRefresh).to have_received(:refresh) + end + end + context "with virtual totals" do before do @ems = FactoryBot.create(:ems_vmware) diff --git a/spec/models/infra_conversion_job_spec.rb b/spec/models/infra_conversion_job_spec.rb index 578b204ee1c..132d5c4b4b5 100644 --- a/spec/models/infra_conversion_job_spec.rb +++ b/spec/models/infra_conversion_job_spec.rb @@ -104,6 +104,62 @@ end end + context '.abort_conversion' do + it 'updates task progress and signals :abort_virtv2v' do + job.state = 'waiting_for_ip_address' + Timecop.freeze(2019, 2, 6) do + progress = { + :current_state => 'waiting_for_ip_address', + :current_description => 'Waiting for IP address', + :percent => 3.5, + :states => { + :waiting_for_ip_address => { + :description => 'Waiting for VM IP address', + :state => 'active', + :status => 'Ok', + :started_on => Time.now.utc - 10.minutes, + :updated_on => Time.now.utc - 5.minutes, + :percent => 10.0 + } + }, + :status => 'ok' + } + task.update_options(:progress => progress) + expect(job).to receive(:queue_signal).once.ordered.with(:abort_virtv2v) + job.abort_conversion('fake error', 'error') + expect(task.reload.options[:progress]).to eq( + :current_state => 'waiting_for_ip_address', + :current_description => 'Migration failed: fake error. Cancelling', + :percent => 3.5, + :states => { + :waiting_for_ip_address => { + :description => 'Waiting for VM IP address', + :state => 'active', + :status => 'Ok', + :started_on => Time.now.utc - 10.minutes, + :updated_on => Time.now.utc - 5.minutes, + :percent => 10.0 + } + }, + :status => 'error' + ) + end + end + + it 'initiate waiting_to_start state, updates task progress and signals :abort_virtv2v' do + job.state = 'waiting_to_start' + expect(job).to receive(:queue_signal).once.ordered.with(:abort_virtv2v) + job.abort_conversion('fake error', 'ok') + expect(job.migration_task.reload.options[:progress]).to eq( + :current_state => 'waiting_to_start', + :current_description => 'Migration failed: fake error. Cancelling', + :percent => 0.0, + :states => {:waiting_to_start => {}}, + :status => 'ok' + ) + end + end + context 'state hash methods' do before do job.state = 'running_migration_playbook' @@ -350,6 +406,61 @@ ) end end + + it 'doesn\'t update the task progress hash if progress[:status] is "error"' do + Timecop.freeze(2019, 2, 6) do + progress = { + :current_state => 'running_migration_playbook', + :current_description => 'Running pre-migration playbook failed: fake error', + :percent => 3.5, + :states => { + :waiting_for_ip_address => { + :description => 'Waiting for VM IP address', + :state => 'finished', + :status => 'Ok', + :started_on => Time.now.utc - 10.minutes, + :updated_on => Time.now.utc - 5.minutes, + :percent => 100.0 + }, + :running_migration_playbook => { + :description => 'Running pre-migration playbook', + :state => 'finished', + :status => 'Error', + :started_on => Time.now.utc - 1.minute, + :percent => 10.0, + :updated_on => Time.now.utc - 30.seconds + } + }, + :status => 'error' + } + task.update_options(:progress => progress) + job.update_migration_task_progress(:on_retry, :percent => 30) + expect(task.reload.options[:progress]).to eq( + :current_state => 'running_migration_playbook', + :current_description => 'Running pre-migration playbook failed: fake error', + :percent => 3.5, + :states => { + :waiting_for_ip_address => { + :description => 'Waiting for VM IP address', + :state => 'finished', + :status => 'Ok', + :started_on => Time.now.utc - 10.minutes, + :updated_on => Time.now.utc - 5.minutes, + :percent => 100.0 + }, + :running_migration_playbook => { + :description => 'Running pre-migration playbook', + :state => 'finished', + :status => 'Error', + :started_on => Time.now.utc - 1.minute, + :percent => 10.0, + :updated_on => Time.now.utc - 30.seconds + } + }, + :status => 'error' + ) + end + end end context 'on_exit and on_error' do @@ -1390,6 +1501,7 @@ it 'returns a message stating conversion has not started' do task.update_options(:virtv2v_status => 'active', :virtv2v_disks => virtv2v_disks) + allow(job.migration_task).to receive(:two_phase?).and_return(false) Timecop.freeze(2019, 2, 6) do expect(job).to receive(:update_migration_task_progress).once.ordered.with(:on_entry).and_call_original expect(job).to receive(:update_migration_task_progress).once.ordered.with(:on_retry, :message => 'Disk transformation is initializing.', :percent => 1).and_call_original @@ -1411,8 +1523,9 @@ ] end - it "updates message and percentage, and retries if conversion is not finished" do + it "updates message and percentage, and retries if conversion is one-phase and not finished" do task.update_options(:virtv2v_status => 'active', :virtv2v_disks => virtv2v_disks) + allow(job.migration_task).to receive(:two_phase?).and_return(false) Timecop.freeze(2019, 2, 6) do expect(job).to receive(:update_migration_task_progress).once.ordered.with(:on_entry).and_call_original expect(job).to receive(:update_migration_task_progress).once.ordered.with(:on_retry, :message => 'Converting disk 2 / 2 [43.75%].', :percent => 43.75).and_call_original @@ -1425,6 +1538,17 @@ end end + it "retries if conversion is two-phase and not finished" do + task.update_options(:virtv2v_status => 'active', :virtv2v_disks => virtv2v_disks) + allow(job.migration_task).to receive(:warm_migration?).and_return(true) + Timecop.freeze(2019, 2, 6) do + expect(job).to receive(:update_migration_task_progress).once.ordered.with(:on_entry).and_call_original + expect(job).to receive(:update_migration_task_progress).once.ordered.with(:on_retry, :message => 'Converting disks') + expect(job).to receive(:queue_signal).with(:poll_transform_vm_complete, :deliver_on => Time.now.utc + job.state_retry_interval) + job.signal(:poll_transform_vm_complete) + end + end + it "aborts if conversion failed" do task.update_options(:virtv2v_status => 'failed', :virtv2v_message => 'virt-v2v failed for some reason') expect(job).to receive(:abort_conversion).with('virt-v2v failed for some reason', 'error').and_call_original diff --git a/spec/models/manageiq/providers/embedded_ansible/automation_manager_spec.rb b/spec/models/manageiq/providers/embedded_ansible/automation_manager_spec.rb index ee974562b25..d76b6b78a9c 100644 --- a/spec/models/manageiq/providers/embedded_ansible/automation_manager_spec.rb +++ b/spec/models/manageiq/providers/embedded_ansible/automation_manager_spec.rb @@ -1,9 +1,15 @@ RSpec.describe ManageIQ::Providers::EmbeddedAnsible::AutomationManager do - context 'catalog types' do + describe ".catalog_types" do + it "includes generic_ansible_playbook" do + expect(described_class.catalog_types).to include("generic_ansible_playbook") + end + end + + describe '#catalog_types' do let(:ems) { FactoryBot.create(:embedded_automation_manager_ansible) } - it "#supported_catalog_types" do - expect(ems.supported_catalog_types).to eq(%w(generic_ansible_playbook)) + it "includes generic_ansible_playbook" do + expect(ems.catalog_types).to include("generic_ansible_playbook") end end end diff --git a/spec/models/manageiq/providers/infra_manager_spec.rb b/spec/models/manageiq/providers/infra_manager_spec.rb index 222d8e63a91..a2cdc1cb9d4 100644 --- a/spec/models/manageiq/providers/infra_manager_spec.rb +++ b/spec/models/manageiq/providers/infra_manager_spec.rb @@ -25,4 +25,14 @@ expect(described_class.ems_timeouts(:ems_redhat, :InVentory)).to eq [5.hours, nil] end end + + describe '.clusterless_hosts' do + it "hosts with no ems" do + ems = FactoryBot.create(:ems_infra) + host = FactoryBot.create(:host, :ext_management_system => ems) + FactoryBot.create(:host, :ext_management_system => ems, :ems_cluster => FactoryBot.create(:ems_cluster)) + + expect(ems.clusterless_hosts).to eq([host]) + end + end end diff --git a/spec/models/miq_ae_class_spec.rb b/spec/models/miq_ae_class_spec.rb index 2eb9ee50040..44f62f3c16e 100644 --- a/spec/models/miq_ae_class_spec.rb +++ b/spec/models/miq_ae_class_spec.rb @@ -39,6 +39,7 @@ before do @user = FactoryBot.create(:user_with_group) + @ns = FactoryBot.create(:miq_ae_namespace, :name => "TEST", :parent => FactoryBot.create(:miq_ae_domain)) end it "should not create class without namespace" do @@ -46,20 +47,20 @@ end it "should not create class without name" do - expect { MiqAeClass.new(:namespace => "TEST").save! }.to raise_error(ActiveRecord::RecordInvalid) + expect { MiqAeClass.new(:namespace_id => @ns.id).save! }.to raise_error(ActiveRecord::RecordInvalid) end it "should set the updated_by field on save" do - c1 = MiqAeClass.create(:namespace => "TEST", :name => "oleg") + c1 = MiqAeClass.create(:namespace_id => @ns.id, :name => "oleg") expect(c1.updated_by).to eq('system') end it "should not create classes with the same name in the same namespace" do - c1 = MiqAeClass.new(:namespace => "TEST", :name => "oleg") + c1 = MiqAeClass.new(:namespace_id => @ns.id, :name => "oleg") expect(c1).not_to be_nil expect(c1.save!).to be_truthy - expect { MiqAeClass.new(:namespace => "TEST", :name => "OLEG").save! }.to raise_error(ActiveRecord::RecordInvalid) - c2 = MiqAeClass.new(:namespace => "PROD", :name => "oleg") + expect { MiqAeClass.new(:namespace_id => @ns.id, :name => "OLEG").save! }.to raise_error(ActiveRecord::RecordInvalid) + c2 = MiqAeClass.new(:namespace_id => FactoryBot.create(:miq_ae_namespace).id, :name => "oleg") expect(c2).not_to be_nil expect(c2.save!).to be_truthy end @@ -177,13 +178,13 @@ def set_priority(name, value) context "#copy" do before do - @d1 = FactoryBot.create(:miq_ae_namespace, :name => "domain1", :parent_id => nil, :priority => 1) - @ns1 = FactoryBot.create(:miq_ae_namespace, :name => "ns1", :parent_id => @d1.id) + @d1 = FactoryBot.create(:miq_ae_namespace, :name => "domain1", :parent => nil, :priority => 1) + @ns1 = FactoryBot.create(:miq_ae_namespace, :name => "ns1", :parent => @d1) @cls1 = FactoryBot.create(:miq_ae_class, :name => "cls1", :namespace_id => @ns1.id) @cls2 = FactoryBot.create(:miq_ae_class, :name => "cls2", :namespace_id => @ns1.id) @d2 = FactoryBot.create(:miq_ae_domain, :name => "domain2", :priority => 2) - @ns2 = FactoryBot.create(:miq_ae_namespace, :name => "ns2", :parent_id => @d2.id) + @ns2 = FactoryBot.create(:miq_ae_namespace, :name => "ns2", :parent => @d2) end it "copies classes under specified namespace" do diff --git a/spec/models/miq_ae_field_spec.rb b/spec/models/miq_ae_field_spec.rb index f80b3244e63..12b742dcc4c 100644 --- a/spec/models/miq_ae_field_spec.rb +++ b/spec/models/miq_ae_field_spec.rb @@ -41,7 +41,8 @@ context "legacy tests" do before do - @c1 = MiqAeClass.create(:namespace => "TEST", :name => "fields_test") + @ns = FactoryBot.create(:miq_ae_namespace, :name => "TEST", :parent => FactoryBot.create(:miq_ae_domain)) + @c1 = MiqAeClass.create(:namespace_id => @ns.id, :name => "fields_test") @user = FactoryBot.create(:user_with_group) end diff --git a/spec/models/miq_ae_instance_spec.rb b/spec/models/miq_ae_instance_spec.rb index 3cdf30bd66d..d4a103a7113 100644 --- a/spec/models/miq_ae_instance_spec.rb +++ b/spec/models/miq_ae_instance_spec.rb @@ -2,7 +2,8 @@ context "legacy tests" do before do @user = FactoryBot.create(:user_with_group) - @c1 = MiqAeClass.create(:namespace => "TEST", :name => "instance_test") + @ns = FactoryBot.create(:miq_ae_namespace, :name => "TEST", :parent => FactoryBot.create(:miq_ae_domain)) + @c1 = MiqAeClass.create(:namespace_id => @ns.id, :name => "instance_test") @fname1 = "field1" @f1 = @c1.ae_fields.create(:name => @fname1) end @@ -149,7 +150,7 @@ it "should return editable as false if the parent namespace/class is not editable" do d1 = FactoryBot.create(:miq_ae_system_domain, :tenant => User.current_tenant) - n1 = FactoryBot.create(:miq_ae_namespace, :parent_id => d1.id) + n1 = FactoryBot.create(:miq_ae_namespace, :parent => d1) c1 = FactoryBot.create(:miq_ae_class, :namespace_id => n1.id, :name => "foo") i1 = FactoryBot.create(:miq_ae_instance, :class_id => c1.id, :name => "foo_instance") expect(i1.editable?(@user)).to be_falsey @@ -158,7 +159,7 @@ it "should return editable as true if the parent namespace/class is editable" do User.current_user = @user d1 = FactoryBot.create(:miq_ae_domain, :tenant => User.current_tenant) - n1 = FactoryBot.create(:miq_ae_namespace, :parent_id => d1.id) + n1 = FactoryBot.create(:miq_ae_namespace, :parent => d1) c1 = FactoryBot.create(:miq_ae_class, :namespace_id => n1.id, :name => "foo") i1 = FactoryBot.create(:miq_ae_instance, :class_id => c1.id, :name => "foo_instance") expect(i1.editable?(@user)).to be_truthy @@ -204,14 +205,14 @@ context "#copy" do before do - @d1 = FactoryBot.create(:miq_ae_namespace, :name => "domain1", :parent_id => nil, :priority => 1) - @ns1 = FactoryBot.create(:miq_ae_namespace, :name => "ns1", :parent_id => @d1.id) + @d1 = FactoryBot.create(:miq_ae_namespace, :name => "domain1", :priority => 1) + @ns1 = FactoryBot.create(:miq_ae_namespace, :name => "ns1", :parent => @d1) @cls1 = FactoryBot.create(:miq_ae_class, :name => "cls1", :namespace_id => @ns1.id) @i1 = FactoryBot.create(:miq_ae_instance, :class_id => @cls1.id, :name => "foo_instance1") @i2 = FactoryBot.create(:miq_ae_instance, :class_id => @cls1.id, :name => "foo_instance2") @d2 = FactoryBot.create(:miq_ae_domain, :name => "domain2", :priority => 2) - @ns2 = FactoryBot.create(:miq_ae_namespace, :name => "ns2", :parent_id => @d2.id) + @ns2 = FactoryBot.create(:miq_ae_namespace, :name => "ns2", :parent => @d2) end it "copies instances under specified namespace" do @@ -261,8 +262,8 @@ let(:u1) { FactoryBot.create(:user_with_group, :name => 'user1') } let(:d1) { FactoryBot.create(:miq_ae_domain, :name => 'dom1', :priority => 1) } let(:d2) { FactoryBot.create(:miq_ae_domain, :name => 'dom2', :priority => 2) } - let(:n1) { FactoryBot.create(:miq_ae_namespace, :parent_id => d1.id, :name => "namespace") } - let(:n2) { FactoryBot.create(:miq_ae_namespace, :parent_id => d2.id, :name => "namespace") } + let(:n1) { FactoryBot.create(:miq_ae_namespace, :parent => d1, :name => "namespace") } + let(:n2) { FactoryBot.create(:miq_ae_namespace, :parent => d2, :name => "namespace") } let(:c1) { FactoryBot.create(:miq_ae_class, :namespace_id => n1.id, :name => "class") } let(:c2) { FactoryBot.create(:miq_ae_class, :namespace_id => n2.id, :name => "class") } let!(:i1) { FactoryBot.create(:miq_ae_instance, :class_id => c1.id, :name => "instance") } diff --git a/spec/models/miq_ae_method_spec.rb b/spec/models/miq_ae_method_spec.rb index f0da0491dc2..80af6b19fbb 100644 --- a/spec/models/miq_ae_method_spec.rb +++ b/spec/models/miq_ae_method_spec.rb @@ -40,13 +40,13 @@ context "#copy" do let(:d2) { FactoryBot.create(:miq_ae_domain, :name => "domain2", :priority => 2) } - let(:ns1) { FactoryBot.create(:miq_ae_namespace, :name => "ns1", :parent_id => @d1.id) } + let(:ns1) { FactoryBot.create(:miq_ae_namespace, :name => "ns1", :parent => @d1) } let(:m1) { FactoryBot.create(:miq_ae_method, :class_id => @cls1.id, :name => "foo_method1", :scope => "instance", :language => "ruby", :location => "inline") } let(:m2) { FactoryBot.create(:miq_ae_method, :class_id => @cls1.id, :name => "foo_method2", :scope => "instance", :language => "ruby", :location => "inline") } before do - @d1 = FactoryBot.create(:miq_ae_namespace, :name => "domain1", :parent_id => nil, :priority => 1) + @d1 = FactoryBot.create(:miq_ae_namespace, :name => "domain1", :parent => nil, :priority => 1) @cls1 = FactoryBot.create(:miq_ae_class, :name => "cls1", :namespace_id => ns1.id) - @ns2 = FactoryBot.create(:miq_ae_namespace, :name => "ns2", :parent_id => d2.id) + @ns2 = FactoryBot.create(:miq_ae_namespace, :name => "ns2", :parent => d2) end it "copies instances under specified namespace" do @@ -117,7 +117,7 @@ it "#domain" do d1 = FactoryBot.create(:miq_ae_system_domain, :name => 'dom1', :priority => 10) - n1 = FactoryBot.create(:miq_ae_namespace, :name => 'ns1', :parent_id => d1.id) + n1 = FactoryBot.create(:miq_ae_namespace, :name => 'ns1', :parent => d1) c1 = FactoryBot.create(:miq_ae_class, :namespace_id => n1.id, :name => "foo") m1 = FactoryBot.create(:miq_ae_method, :class_id => c1.id, @@ -130,7 +130,7 @@ it "#to_export_yaml" do d1 = FactoryBot.create(:miq_ae_system_domain, :name => 'dom1', :priority => 10) - n1 = FactoryBot.create(:miq_ae_namespace, :name => 'ns1', :parent_id => d1.id) + n1 = FactoryBot.create(:miq_ae_namespace, :name => 'ns1', :parent => d1) c1 = FactoryBot.create(:miq_ae_class, :namespace_id => n1.id, :name => "foo") m1 = FactoryBot.create(:miq_ae_method, :class_id => c1.id, diff --git a/spec/models/miq_ae_namespace_spec.rb b/spec/models/miq_ae_namespace_spec.rb index 71160228449..f29e1b9f3cf 100644 --- a/spec/models/miq_ae_namespace_spec.rb +++ b/spec/models/miq_ae_namespace_spec.rb @@ -34,17 +34,17 @@ context "with a duplicite names" do let(:domain) { FactoryBot.create(:miq_ae_domain) } - let(:ns1) { FactoryBot.create(:miq_ae_namespace, :name => 'ns1', :parent_id => domain.id) } + let(:ns1) { FactoryBot.create(:miq_ae_namespace, :name => 'ns1', :parent => domain) } before do - FactoryBot.create(:miq_ae_namespace, :name => 'namespace', :parent_id => ns1.id) + FactoryBot.create(:miq_ae_namespace, :name => 'namespace', :parent => ns1) end it "with a distinct path is allowed" do # domain/ns1/namespace # domain/ns2/namespace - ns2 = FactoryBot.create(:miq_ae_namespace, :name => 'ns2', :parent_id => domain.id) - dup_namespace = FactoryBot.create(:miq_ae_namespace, :name => 'namespace', :parent_id => ns2.id) + ns2 = FactoryBot.create(:miq_ae_namespace, :name => 'ns2', :parent => domain) + dup_namespace = FactoryBot.create(:miq_ae_namespace, :name => 'namespace', :parent => ns2) expect(ns2.valid?).to be_truthy expect(dup_namespace.valid?).to be_truthy @@ -54,7 +54,7 @@ # domain/ns1/namespace # domain/ns1/NAMESPACE expect do - FactoryBot.create(:miq_ae_namespace, :name => 'NAMESPACE', :parent_id => ns1.id) + FactoryBot.create(:miq_ae_namespace, :name => 'NAMESPACE', :parent => ns1) end.to raise_error("Validation failed: MiqAeNamespace: Name has already been taken") end end @@ -92,9 +92,9 @@ n1 = FactoryBot.create(:miq_ae_system_domain, :tenant => @user.current_tenant) expect(n1.editable?(@user)).to be_falsey - n2 = MiqAeNamespace.create!(:name => 'ns2', :parent_id => n1.id) + n2 = MiqAeNamespace.create!(:name => 'ns2', :parent => n1) - n3 = MiqAeNamespace.create!(:name => 'ns3', :parent_id => n2.id) + n3 = MiqAeNamespace.create!(:name => 'ns3', :parent => n2) expect(n3.editable?(@user)).to be_falsey end diff --git a/spec/models/miq_policy_spec.rb b/spec/models/miq_policy_spec.rb index 147d544cc88..da75a9e4845 100644 --- a/spec/models/miq_policy_spec.rb +++ b/spec/models/miq_policy_spec.rb @@ -207,6 +207,15 @@ end end + describe "#miq_policies (virtual_has_many)" do + before { profiles } + + it "gets the policies under a profile" do + expect(MiqPolicySet.find_by(:name => "ps3").miq_policies).to match_array([policies[0]]) + expect(MiqPolicySet.find_by(:name => "ps4").miq_policies).to match_array([policies[1]]) + end + end + describe ".enforce_policy" do it 'executes policies for a target' do allow(target).to receive(:get_policies).and_return(profiles) diff --git a/spec/models/miq_queue_spec.rb b/spec/models/miq_queue_spec.rb index 859ec07b50a..ebb6aaf4e91 100644 --- a/spec/models/miq_queue_spec.rb +++ b/spec/models/miq_queue_spec.rb @@ -986,4 +986,89 @@ def queue_items ) end end + + describe ".messaging_client_options" do + context "with ENV" do + let(:env_vars) { ENV.to_h.merge("MESSAGING_HOSTNAME" => "server.example.com", "MESSAGING_PORT" => "9092", "MESSAGING_USERNAME" => "admin") } + + context "prefers settings from ENV when they exist" do + it "with clear text password" do + stub_const("ENV", env_vars.to_h.merge("MESSAGING_PASSWORD" => "password")) + + expect(YAML).not_to receive(:load_file).with(MiqQueue::MESSAGING_CONFIG_FILE) + + expect(MiqQueue.send(:messaging_client_options)).to eq( + :encoding => "json", + :host => "server.example.com", + :password => "password", + :port => 9092, + :protocol => nil, + :username => "admin" + ) + end + + it "with encrypted password" do + stub_const("ENV", env_vars.to_h.merge("MESSAGING_PASSWORD" => MiqPassword.encrypt("password"))) + + expect(YAML).not_to receive(:load_file).with(MiqQueue::MESSAGING_CONFIG_FILE) + + expect(ENV["MESSAGING_PASSWORD"]).to be_encrypted + expect(MiqQueue.send(:messaging_client_options)).to eq( + :encoding => "json", + :host => "server.example.com", + :password => "password", + :port => 9092, + :protocol => nil, + :username => "admin" + ) + end + end + + it "prefers settings from file if any ENV vars are missing" do + stub_const("ENV", env_vars) # No password + + allow(YAML).to receive(:load_file).and_call_original + expect(YAML).to receive(:load_file).with(MiqQueue::MESSAGING_CONFIG_FILE).and_return("test" => {"hostname" => "kafka.example.com", "port" => 9092, "username" => "user", "password" => "password"}) + + expect(MiqQueue.send(:messaging_client_options)).to eq( + :encoding => "json", + :host => "kafka.example.com", + :password => "password", + :port => 9092, + :protocol => nil, + :username => "user" + ) + end + end + + context "prefers settings from file when ENV vars are missing" do + it "with clear text password" do + allow(YAML).to receive(:load_file).and_call_original + expect(YAML).to receive(:load_file).with(MiqQueue::MESSAGING_CONFIG_FILE).and_return("test" => {"hostname" => "kafka.example.com", "port" => 9092, "username" => "user", "password" => "password"}) + + expect(MiqQueue.send(:messaging_client_options)).to eq( + :encoding => "json", + :host => "kafka.example.com", + :password => "password", + :port => 9092, + :protocol => nil, + :username => "user" + ) + end + + it "with encrypted password" do + allow(YAML).to receive(:load_file).and_call_original + expect(YAML).to receive(:load_file).with(MiqQueue::MESSAGING_CONFIG_FILE).and_return("test" => {"hostname" => "kafka.example.com", "port" => 9092, "username" => "user", "password" => MiqPassword.encrypt("password")}) + + expect(MiqQueue.send(:messaging_client_options)).to eq( + :encoding => "json", + :host => "kafka.example.com", + :password => "password", + :port => 9092, + :protocol => nil, + :username => "user" + ) + end + end + end end diff --git a/spec/models/miq_report/generator_spec.rb b/spec/models/miq_report/generator_spec.rb index 2dd9fd7df99..ccf30928f21 100644 --- a/spec/models/miq_report/generator_spec.rb +++ b/spec/models/miq_report/generator_spec.rb @@ -67,6 +67,23 @@ expect(@miq_report_profile_all.table.data[0].data).to include("min_trend_value" => 400, "max_trend_value" => 700) end + + it "handles merging WHERE clauses from MiqReport#where_clause and options[:where_clause]" do + FactoryBot.create(:vm) # filtered out by option[:where_clause] + FactoryBot.create(:template) # filtered out by report.where_clause + vm = FactoryBot.create(:vm, :vendor => "redhat") + + rpt = FactoryBot.create( + :miq_report, + :db => "VmOrTemplate", + :where_clause => ["vms.type = ?", "Vm"], + :col_order => %w[id name host.name vendor] + ) + rpt.generate_table(:userid => @user.userid, :where_clause => {"vms.vendor" => "redhat"}) + + expect(rpt.table.size).to eq(1) + expect(rpt.table.first.id.to_s).to eq(vm.id.to_s) + end end end diff --git a/spec/models/miq_report_spec.rb b/spec/models/miq_report_spec.rb index f76e5103a63..9fe0ef259d1 100644 --- a/spec/models/miq_report_spec.rb +++ b/spec/models/miq_report_spec.rb @@ -1238,6 +1238,17 @@ def user_super_admin? expect(row[label_report_column]).to eq(label_value) end end + + context "more columns with default formatters" do + let(:report_columns) { %w[start_date display_range vm_name cpu_used_cost fixed_compute_1_rate memory_used_metric cpu_used_metric] } + let(:expected_formatters) { [:datetime, nil, nil, :currency_precision_2, nil, :megabytes_human_precision_2, :mhz_precision_2] } + let(:report) { FactoryBot.create(:miq_report, :db => "ChargebackVm", :cols => report_columns, :col_order => report_columns) } + + it "calculates default formatters" do + expect(report.col_format_with_defaults).to eq(expected_formatters) + expect(report.col_formats).to be_nil + end + end end describe "_async_generate_table" do diff --git a/spec/models/miq_web_server_worker_spec.rb b/spec/models/miq_web_server_worker_spec.rb new file mode 100644 index 00000000000..fdb0cb7db4a --- /dev/null +++ b/spec/models/miq_web_server_worker_spec.rb @@ -0,0 +1,8 @@ +RSpec.describe MiqWebServiceWorker do + it "preload_for_worker_role autoloads api collection classes and descendants" do + allow(EvmDatabase).to receive(:seeded_primordially?).and_return(true) + expect(MiqWebServiceWorker).to receive(:configure_secret_token) + MiqWebServiceWorker.preload_for_worker_role + expect(defined?(ServiceAnsibleTower)).to be_truthy + end +end diff --git a/spec/models/miq_worker/container_common_spec.rb b/spec/models/miq_worker/container_common_spec.rb index 630d3199d14..603db91e0c6 100644 --- a/spec/models/miq_worker/container_common_spec.rb +++ b/spec/models/miq_worker/container_common_spec.rb @@ -19,7 +19,6 @@ def deployment_name_for(name) :template => { :metadata => {:name => "test", :labels => {:name => "test", :app => "manageiq"}}, :spec => { - :serviceAccountName => "miq-anyuid", :containers => [{ :name => "test", :env => [] diff --git a/spec/models/service_template_transformation_plan_task_spec.rb b/spec/models/service_template_transformation_plan_task_spec.rb index cf7ffd45895..a5bab4d2bf2 100644 --- a/spec/models/service_template_transformation_plan_task_spec.rb +++ b/spec/models/service_template_transformation_plan_task_spec.rb @@ -399,18 +399,18 @@ ) end - it "rescues when conversion_host.get_conversion_state fails less than 5 times" do + it "rescues when conversion_host.get_conversion_state fails less than 20 times" do task_1.update_options(:get_conversion_state_failures => 2) allow(conversion_host).to receive(:get_conversion_state).with(task_1.id).and_raise("Fake error") task_1.get_conversion_state expect(task_1.options[:get_conversion_state_failures]).to eq(3) end - it "rescues when conversion_host.get_conversion_state fails more than 5 times" do - task_1.update_options(:get_conversion_state_failures => 5) + it "rescues when conversion_host.get_conversion_state fails more than 20 times" do + task_1.update_options(:get_conversion_state_failures => 20) allow(conversion_host).to receive(:get_conversion_state).with(task_1.id).and_raise("Fake error") - expect { task_1.get_conversion_state }.to raise_error("Failed to get conversion state 5 times in a row") - expect(task_1.options[:get_conversion_state_failures]).to eq(6) + expect { task_1.get_conversion_state }.to raise_error("Failed to get conversion state 20 times in a row") + expect(task_1.options[:get_conversion_state_failures]).to eq(21) end it "updates progress when conversion is failed" do @@ -654,10 +654,13 @@ it "generates conversion options hash" do expect(task_1.conversion_options).to eq( - :vm_name => "ssh://root@10.0.0.1/vmfs/volumes/stockage%20r%C3%A9cent/#{src_vm_1.location}", + :vm_name => src_vm_1.name, :vm_uuid => src_vm_1.uid_ems, :conversion_host_uuid => conversion_host.resource.ems_ref, :transport_method => 'ssh', + :vmware_fingerprint => '01:23:45:67:89:ab:cd:ef:01:23:45:67:89:ab:cd:ef:01:23:45:67', + :vmware_uri => "ssh://root@10.0.0.1/vmfs/volumes/stockage%20r%C3%A9cent/#{src_vm_1.location}", + :vmware_password => 'esx_passwd', :rhv_url => "https://#{redhat_ems.hostname}/ovirt-engine/api", :rhv_cluster => redhat_cluster.name, :rhv_storage => redhat_storages.first.name, @@ -666,6 +669,8 @@ :network_mappings => task_1.network_mappings, :install_drivers => true, :insecure_connection => true, + :two_phase => true, + :warm => false, :daemonize => false ) end @@ -673,10 +678,13 @@ it "generates conversion options hash with host custom IP address" do src_host.miq_custom_set('TransformationIPAddress', '192.168.254.1') expect(task_1.conversion_options).to eq( - :vm_name => "ssh://root@192.168.254.1/vmfs/volumes/stockage%20r%C3%A9cent/#{src_vm_1.location}", + :vm_name => src_vm_1.name, :vm_uuid => src_vm_1.uid_ems, :conversion_host_uuid => conversion_host.resource.ems_ref, :transport_method => 'ssh', + :vmware_fingerprint => '01:23:45:67:89:ab:cd:ef:01:23:45:67:89:ab:cd:ef:01:23:45:67', + :vmware_uri => "ssh://root@192.168.254.1/vmfs/volumes/stockage%20r%C3%A9cent/#{src_vm_1.location}", + :vmware_password => 'esx_passwd', :rhv_url => "https://#{redhat_ems.hostname}/ovirt-engine/api", :rhv_cluster => redhat_cluster.name, :rhv_storage => redhat_storages.first.name, @@ -685,6 +693,8 @@ :network_mappings => task_1.network_mappings, :install_drivers => true, :insecure_connection => true, + :two_phase => true, + :warm => false, :daemonize => false ) end @@ -816,10 +826,13 @@ it "generates conversion options hash" do expect(task_1.conversion_options).to eq( - :vm_name => "ssh://root@10.0.0.1/vmfs/volumes/stockage%20r%C3%A9cent/#{src_vm_1.location}", + :vm_name => src_vm_1.name, :vm_uuid => src_vm_1.uid_ems, :conversion_host_uuid => conversion_host.ems_ref, :transport_method => 'ssh', + :vmware_fingerprint => '01:23:45:67:89:ab:cd:ef:01:23:45:67:89:ab:cd:ef:01:23:45:67', + :vmware_uri => "ssh://root@10.0.0.1/vmfs/volumes/stockage%20r%C3%A9cent/#{src_vm_1.location}", + :vmware_password => 'esx_passwd', :osp_environment => { :os_auth_url => URI::Generic.build( :scheme => openstack_ems.security_protocol == 'non-ssl' ? 'http' : 'https', @@ -840,6 +853,8 @@ :osp_security_groups_ids => [openstack_security_group.ems_ref], :source_disks => [src_disk_1.filename, src_disk_2.filename], :network_mappings => task_1.network_mappings, + :two_phase => true, + :warm => false, :daemonize => false ) end diff --git a/spec/models/vm_or_template_spec.rb b/spec/models/vm_or_template_spec.rb index 39a606dec31..b01a1351919 100644 --- a/spec/models/vm_or_template_spec.rb +++ b/spec/models/vm_or_template_spec.rb @@ -126,6 +126,88 @@ end end + context ".from_cloud_managers" do + context "with cloud and infra vms" do + let!(:cloud_vm) { FactoryBot.create(:vm_cloud, :ext_management_system => FactoryBot.create(:ems_cloud)) } + let!(:infra_vm) { FactoryBot.create(:vm_infra, :ext_management_system => FactoryBot.create(:ems_infra)) } + + it "returns a cloud vm" do + expect(described_class.from_cloud_managers).to include(cloud_vm) + end + + it "doesn't return an infra vm" do + expect(described_class.from_cloud_managers).not_to include(infra_vm) + end + end + + context "with archived vms" do + let!(:archived_vm) { FactoryBot.create(:vm_cloud, :ext_management_system => nil) } + + it "doesn't return an archived vm" do + expect(described_class.from_cloud_managers).not_to include(archived_vm) + end + end + end + + context ".from_infra_managers" do + context "with cloud and infra vms" do + let!(:cloud_vm) { FactoryBot.create(:vm_cloud, :ext_management_system => FactoryBot.create(:ems_cloud)) } + let!(:infra_vm) { FactoryBot.create(:vm_infra, :ext_management_system => FactoryBot.create(:ems_infra)) } + + it "returns an infra vm" do + expect(described_class.from_infra_managers).to include(infra_vm) + end + + it "doesn't return a cloud vm" do + expect(described_class.from_infra_managers).not_to include(cloud_vm) + end + end + + context "with archived vms" do + let!(:archived_vm) { FactoryBot.create(:vm_infra, :ext_management_system => nil) } + + it "doesn't return an archived vm" do + expect(described_class.from_infra_managers).not_to include(archived_vm) + end + end + end + + context "#from_cloud_manager?" do + let(:cloud_vm) { FactoryBot.create(:vm_cloud, :ext_management_system => FactoryBot.create(:ems_cloud)) } + let(:infra_vm) { FactoryBot.create(:vm_infra, :ext_management_system => FactoryBot.create(:ems_infra)) } + let(:archived_vm) { FactoryBot.create(:vm_infra, :ext_management_system => nil) } + + it "returns true for a cloud vm" do + expect(cloud_vm.from_cloud_manager?).to be_truthy + end + + it "returns false for an infra vm" do + expect(infra_vm.from_cloud_manager?).to be_falsey + end + + it "returns false for an archived vm" do + expect(archived_vm.from_cloud_manager?).to be_falsey + end + end + + context "#from_infra_manager?" do + let(:cloud_vm) { FactoryBot.create(:vm_cloud, :ext_management_system => FactoryBot.create(:ems_cloud)) } + let(:infra_vm) { FactoryBot.create(:vm_infra, :ext_management_system => FactoryBot.create(:ems_infra)) } + let(:archived_vm) { FactoryBot.create(:vm_infra, :ext_management_system => nil) } + + it "returns false for a cloud vm" do + expect(cloud_vm.from_infra_manager?).to be_falsey + end + + it "returns true for an infra vm" do + expect(infra_vm.from_infra_manager?).to be_truthy + end + + it "returns false for an archived vm" do + expect(archived_vm.from_infra_manager?).to be_falsey + end + end + context ".event_by_property" do context "should add an EMS event" do before do diff --git a/spec/models/vm_reconfigure_task_spec.rb b/spec/models/vm_reconfigure_task_spec.rb index 0c512aa171f..d44d09526f3 100644 --- a/spec/models/vm_reconfigure_task_spec.rb +++ b/spec/models/vm_reconfigure_task_spec.rb @@ -40,20 +40,24 @@ end context "Single Disk add " do - let(:request_options) { {:disk_add => [{"disk_size_in_mb" => "33", "persistent" => "true"}.with_indifferent_access]} } - let(:description_partial) { "Add Disks: 1 : #{request.options[:disk_add][0]["disk_size_in_mb"].to_i.megabytes.to_s(:human_size)} " } + let(:request_options) { {:disk_add => [{"disk_size_in_mb" => "33", "persistent" => "true", "type" => "thin"}.with_indifferent_access]} } + let(:description_partial) do + "Add Disks: 1 : #{request.options[:disk_add][0]["disk_size_in_mb"].to_i.megabytes.to_s(:human_size)}, Type: "\ + "#{request.options[:disk_add][0]["type"]} " + end it_behaves_like ".get_description" end context "Multiple Disk add " do let(:request_options) do - {:disk_add => [{"disk_size_in_mb" => "33", "persistent" => "true"}.with_indifferent_access, - {"disk_size_in_mb" => "44", "persistent" => "true"}.with_indifferent_access]} + {:disk_add => [{"disk_size_in_mb" => "33", "persistent" => "true", "type" => "thin"}.with_indifferent_access, + {"disk_size_in_mb" => "44", "persistent" => "true", "type" => "thick"}.with_indifferent_access]} end let(:description_partial) do - "Add Disks: 2 : #{request.options[:disk_add][0]["disk_size_in_mb"].to_i.megabytes.to_s(:human_size)}, "\ - "#{request.options[:disk_add][1]["disk_size_in_mb"].to_i.megabytes.to_s(:human_size)} " + "Add Disks: 2 : #{request.options[:disk_add][0]["disk_size_in_mb"].to_i.megabytes.to_s(:human_size)}, Type: "\ + "#{request.options[:disk_add][0]["type"]}, #{request.options[:disk_add][1]["disk_size_in_mb"].to_i.megabytes.to_s(:human_size)}, Type: "\ + "#{request.options[:disk_add][1]["type"]} " end it_behaves_like ".get_description" diff --git a/spec/support/ems_refresh_helper.rb b/spec/support/ems_refresh_helper.rb index b95c331b58a..3b2367c6176 100644 --- a/spec/support/ems_refresh_helper.rb +++ b/spec/support/ems_refresh_helper.rb @@ -7,7 +7,7 @@ def serialize_inventory models = ApplicationRecord.subclasses - skip_models # Skip attributes that always change between refreshes - skip_attrs_global = ["created_on", "updated_on"] + skip_attrs_global = ["created_on", "created_at", "updated_on", "updated_at"] skip_attrs_by_model = { "ExtManagementSystem" => ["last_refresh_date", "last_inventory_date"], } diff --git a/spec/support/vmdb_permission_store_helper.rb b/spec/support/vmdb_permission_store_helper.rb deleted file mode 100644 index cf9c0db9f1e..00000000000 --- a/spec/support/vmdb_permission_store_helper.rb +++ /dev/null @@ -1,25 +0,0 @@ -require 'tempfile' - -def stub_vmdb_permission_store - original_store = Vmdb::PermissionStores.instance - yield -ensure - Vmdb::PermissionStores.instance = original_store -end - -def stub_vmdb_permission_store_with_types(types) - stub_vmdb_permission_store do - Tempfile.create(%w(config yml)) do |f| - f.write(types.to_yaml) - f.close - - Vmdb::PermissionStores.configure do |config| - config.backend = 'yaml' - config.options[:filename] = f.path - end - Vmdb::PermissionStores.initialize! - - yield - end - end -end diff --git a/spec/tools/server_settings_replicator/server_settings_replicator_spec.rb b/spec/tools/server_settings_replicator/server_settings_replicator_spec.rb deleted file mode 100644 index 7f791816a32..00000000000 --- a/spec/tools/server_settings_replicator/server_settings_replicator_spec.rb +++ /dev/null @@ -1,36 +0,0 @@ -$LOAD_PATH << Rails.root.join("tools").to_s - -require "server_settings_replicator/server_settings_replicator" - -RSpec.describe ServerSettingsReplicator do - let(:miq_server) { EvmSpecHelper.local_miq_server } - let!(:miq_server_remote) { EvmSpecHelper.remote_miq_server } - let(:settings) { {:k1 => {:k2 => {:k3 => 'v3'}}} } - - describe "#replicate" do - it "targets only other servers" do - miq_server.add_settings_for_resource(settings) - expected_output = <<~MESSAGE - Replicating from server id=#{miq_server.id}, path=k1/k2 to 1 servers - Settings: {:k1=>{:k2=>{:k3=>"v3"}}} - Done - MESSAGE - expect(described_class).to receive(:copy_to).with([miq_server_remote], settings) - expect { described_class.replicate(miq_server, 'k1/k2') }.to output(expected_output).to_stdout - end - end - - describe "#construct_setting_tree" do - it "handle simple value" do - path = [:k1, :k2] - values = 'abc' - expect(described_class.construct_setting_tree(path, values)).to eq(:k1 => {:k2 => 'abc'}) - end - - it "handle hash value" do - path = [:k1, :k2] - values = {:k3 => 'v3', :k4 => 'v4'} - expect(described_class.construct_setting_tree(path, values)).to eq(:k1 => {:k2 => {:k3 => 'v3', :k4 => 'v4'}}) - end - end -end diff --git a/tools/configure_server_settings.rb b/tools/configure_server_settings.rb index 2198a2f3662..977ef559093 100755 --- a/tools/configure_server_settings.rb +++ b/tools/configure_server_settings.rb @@ -13,17 +13,18 @@ "Example (Float): #{__FILE__} -s 1 -p capacity/profile/1/vcpu_commitment_ratio -v 1.5 -t float" \ "Example (Array): #{__FILE__} -s 1 -p ntp/server -v 0.pool.ntp.org,1.pool.ntp.org -t array" - opt :dry_run, "Dry Run", :short => "d" - opt :serverid, "Server Id", :short => "s", :type => :integer, :required => true - opt :path, "Path within advanced settings hash", :short => "p", :type => :string, :required => true - opt :value, "New Value for setting", :short => "v", :type => :string, :required => true - opt :force, "Force change value regardless of type", :short => "f", :type => :boolean, :default => false - opt :type, "Type of value provided, #{TYPES.inspect}", :short => "t", :type => :string, :default => "string" + opt :dry_run, "Dry Run", :short => "d" + opt :serverid, "Server Id", :short => "s", :type => :integer, :required => false + opt :path, "Path within advanced settings hash", :short => "p", :type => :string, :required => true + opt :value, "New Value for setting", :short => "v", :type => :string, :required => true + opt :force, "Force change value regardless of type", :short => "f", :type => :boolean, :default => false + opt :type, "Type of value provided, #{TYPES.inspect}", :short => "t", :type => :string, :default => "string" + opt :all_servers, "Set setting once for all servers", :short => "a", :type => :boolean, :default => false end puts opts.inspect -Optimist.die :serverid, "is required" unless opts[:serverid_given] +Optimist.die :serverid, "is required" unless opts[:serverid_given] || opts[:all_servers] Optimist.die :path, "is required" unless opts[:path_given] Optimist.die :value, "is required" unless opts[:value_given] Optimist.die :type, "must be one of #{TYPES.inspect}" unless TYPES.include?(opts[:type]) @@ -64,43 +65,45 @@ def types_valid?(old_val, new_val) end end -server = MiqServer.where(:id => opts[:serverid]).take -unless server +server_list = opts[:all_servers] ? MiqServer.all : MiqServer.where(:id => opts[:serverid]) +if server_list.empty? puts "Unable to find server with id [#{opts[:serverid]}]" exit 1 end -settings = server.settings - -path = settings -keys = opts[:path].split("/") -key = keys.pop.to_sym -keys.each { |p| path = path[p.to_sym] } - -# allow user to escape if the new value's class is not the same as the original, -# such as setting a String where it was previously an Integer -if opts[:force] - puts "Change [#{opts[:path]}], old class: [#{path[key].class}], new class: [#{newval.class}]" -elsif path[key] && !types_valid?(path[key], newval) - STDERR.puts "The new value's class #{newval.class} does not match the prior one's #{path[key].class}. Use -t to specify the type for the provided value. Use -f to force changing this value. Note, -f may break things! See -h for examples." - exit 1 -end +server_list.each do |server| + settings = server.settings + server_id = server.id + path = settings + keys = opts[:path].split("/") + key = keys.pop.to_sym + keys.each { |p| path = path[p.to_sym] } + + # allow user to escape if the new value's class is not the same as the original, + # such as setting a String where it was previously an Integer + if opts[:force] + puts "Change [#{opts[:path]}], old class: [#{path[key].class}], new class: [#{newval.class}]" + elsif path[key] && !types_valid?(path[key], newval) + STDERR.puts "The new value's class #{newval.class} does not match the prior one's #{path[key].class}. Use -t to specify the type for the provided value. Use -f to force changing this value. Note, -f may break things! See -h for examples." + exit 1 + end -puts "Setting [#{opts[:path]}], old value: [#{path[key]}], new value: [#{newval}]" -path[key] = newval + puts "Setting [#{opts[:path]}], old value: [#{path[key]}], new value: [#{newval}]" + path[key] = newval -valid, errors = Vmdb::Settings.validate(settings) -unless valid - puts "ERROR: Configuration is invalid:" - errors.each { |k, v| puts "\t#{k}: #{v}" } - exit 1 -end - -if opts[:dry_run] - puts "Dry run, no updates have been made" -else - server.add_settings_for_resource(settings) - server.save! + valid, errors = Vmdb::Settings.validate(settings) + unless valid + puts "ERROR: Configuration is invalid:" + errors.each { |k, v| puts "\t#{k}: #{v}" } + exit 1 + end - puts "Done" + if opts[:dry_run] + puts "Dry run, no updates to server #{server_id} have been made" + else + puts " - replicating to server id=#{server_id}..." + server.add_settings_for_resource(settings) + server.save! + puts "Done" + end end diff --git a/tools/db_printers/print_network.rb b/tools/db_printers/print_network.rb index 0b99fd52a2f..1d8415cbc3c 100755 --- a/tools/db_printers/print_network.rb +++ b/tools/db_printers/print_network.rb @@ -3,7 +3,7 @@ def print_switch(indent, switch) puts "#{indent}Switch: #{switch.name}" - switch.lans.order("lower(name)").each do |lan| + switch.lans.order(Arel.sql("lower(name)")).each do |lan| puts "#{indent} Lan: #{lan.name}" vms = lan.guest_devices.collect { |gd| [gd.hardware.vm, gd.device_name] if gd.hardware && gd.hardware.vm }.compact vms.sort_by { |vm| vm[0].name.downcase }.each { |vm| puts "#{indent} #{vm[0].class}: #{vm[0].name} (vNIC: #{vm[1]})" } @@ -15,7 +15,7 @@ def print_switch(indent, switch) found_switches = [] unless host.hardware.nil? - pnics = host.hardware.guest_devices.where(:device_type => 'ethernet').order("lower(device_name)") + pnics = host.hardware.guest_devices.where(:device_type => 'ethernet').order(Arel.sql("lower(device_name)")) # Group the pNICs by Switch pnics_grouped = [] @@ -40,7 +40,7 @@ def print_switch(indent, switch) unless host.switches.length == found_switches.length puts " pNIC: (None)" - host.switches.order("lower(name)").each do |switch| + host.switches.order(Arel.sql("lower(name)")).each do |switch| next if found_switches.include?(switch.name) print_switch(" ", switch) end diff --git a/tools/db_printers/print_scsi.rb b/tools/db_printers/print_scsi.rb index 51645c714f5..ebc20ad06e1 100755 --- a/tools/db_printers/print_scsi.rb +++ b/tools/db_printers/print_scsi.rb @@ -4,12 +4,12 @@ Host.all.each do |host| puts "Host: #{host.name} (id: #{host.id})" - host.hardware.guest_devices.where(:device_type => 'storage').order("lower(device_name)").each do |adapter| + host.hardware.guest_devices.where(:device_type => 'storage').order(Arel.sql("lower(device_name)")).each do |adapter| sub_name = adapter.iscsi_name.nil? ? "" : " (#{adapter.iscsi_name})" puts " SCSI Adapter: #{adapter.device_name}#{sub_name}" - adapter.miq_scsi_targets.order("lower(target)").each do |target| + adapter.miq_scsi_targets.order(Arel.sql("lower(target)")).each do |target| puts " Target: #{target.iscsi_name} (#{target.target})" - target.miq_scsi_luns.order("lower(lun)").each do |lun| + target.miq_scsi_luns.order(Arel.sql("lower(lun)")).each do |lun| puts " Lun: #{lun.canonical_name} (#{lun.lun})" end end diff --git a/tools/server_settings_replicator/server_settings_replicator.rb b/tools/server_settings_replicator/server_settings_replicator.rb deleted file mode 100755 index 7ee82bf5052..00000000000 --- a/tools/server_settings_replicator/server_settings_replicator.rb +++ /dev/null @@ -1,31 +0,0 @@ -class ServerSettingsReplicator - def self.replicate(server, path_string, dry_run = false) - path = path_string.split("/").map(&:to_sym) - - # all servers except source - target_servers = MiqServer.where.not(:id => server.id) - settings = construct_setting_tree(path, server.settings_for_resource.fetch_path(path).to_h) - - puts "Replicating from server id=#{server.id}, path=#{path_string} to #{target_servers.count} servers" - puts "Settings: #{settings}" - - if dry_run - puts "Dry run, no updates have been made" - else - copy_to(target_servers, settings) - end - puts "Done" - end - - def self.construct_setting_tree(path, values) - # construct the partial tree containing the target values - path.reverse.inject(values) { |merged, element| {element => merged} } - end - - def self.copy_to(target_servers, target_settings) - target_servers.each do |target| - puts " - replicating to server id=#{target.id}..." - target.add_settings_for_resource(target_settings) - end - end -end From e9e48775a0b9fd113145165661f55af5ce610cbe Mon Sep 17 00:00:00 2001 From: Siddhesh-Ghadi <61187612+Siddhesh-Ghadi@users.noreply.github.com> Date: Tue, 28 Apr 2020 13:28:56 +0530 Subject: [PATCH 7/7] Add cxxflags for unf_ext gem installation on power Installation of unf_ext gem < v0.0.7.4 fails on power More info: https://github.com/ManageIQ/manageiq-pods/issues/460#issuecomment-617071986 --- tools/ci/setup_ruby_env.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/ci/setup_ruby_env.sh b/tools/ci/setup_ruby_env.sh index 777d07baa36..ce7ab8a72f9 100755 --- a/tools/ci/setup_ruby_env.sh +++ b/tools/ci/setup_ruby_env.sh @@ -1,5 +1,6 @@ #!/bin/bash ./tools/ci/setup_ruby_environment.rb bundle config --local build.sassc --disable-march-tune-native +bundle config --local build.unf_ext --with-cxxflags=-fsigned-char export BUNDLE_WITHOUT=development export BUNDLE_GEMFILE=${PWD}/Gemfile