diff --git a/Dockerfile b/Dockerfile index f60b5090e..95118615c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ RUN df -h RUN apt-get update -RUN apt-get install -y ruby2.5-dev dnsutils ansible build-essential +RUN apt-get install -y ruby2.5-dev dnsutils ansible build-essential python-pip curl RUN apt-get upgrade -y @@ -24,10 +24,14 @@ RUN ls -la #RUN rm --verbose -f cloud-mu-*.gem +RUN pip install pywinrm + RUN apt-get remove -y build-essential ruby2.5-dev RUN apt-get autoremove -y +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.4/bin/linux/amd64/kubectl && mv kubectl /usr/bin && chmod +x /usr/bin/kubectl + EXPOSE 2260 CMD /usr/sbin/init diff --git a/ansible/roles/mu-windows/files/LaunchConfig.json b/ansible/roles/mu-windows/files/LaunchConfig.json new file mode 100644 index 000000000..03e996b6c --- /dev/null +++ b/ansible/roles/mu-windows/files/LaunchConfig.json @@ -0,0 +1,9 @@ +{ + "setComputerName": false, + "setMonitorAlwaysOn": true, + "setWallpaper": true, + "addDnsSuffixList": true, + "extendBootVolumeSize": true, + "handleUserData": true, + "adminPasswordType": "Random" +} diff --git a/ansible/roles/mu-windows/files/config.xml b/ansible/roles/mu-windows/files/config.xml new file mode 100644 index 000000000..0261bdb34 --- /dev/null +++ b/ansible/roles/mu-windows/files/config.xml @@ -0,0 +1,76 @@ + + + + + Ec2SetPassword + Enabled + + + Ec2SetComputerName + Disabled + + + Ec2InitializeDrives + Enabled + + + Ec2EventLog + Disabled + + + Ec2ConfigureRDP + Disabled + + + Ec2OutputRDPCert + Enabled + + + Ec2SetDriveLetter + Enabled + + + Ec2WindowsActivate + Enabled + + + Ec2DynamicBootVolumeSize + Disabled + + + Ec2SetHibernation + Enabled + + + Ec2SetMonitorAlwaysOn + Disabled + + + Ec2ElasticGpuSetup + Enabled + + + Ec2FeatureLogging + Enabled + + + Ec2SetENAConfig + Enabled + + + Ec2HandleUserData + Enabled + + + AWS.EC2.Windows.CloudWatch.PlugIn + Disabled + + + + true + true + true + true + true + + diff --git a/ansible/roles/mu-windows/tasks/main.yml b/ansible/roles/mu-windows/tasks/main.yml index 1857ec532..25c26e51b 100644 --- a/ansible/roles/mu-windows/tasks/main.yml +++ b/ansible/roles/mu-windows/tasks/main.yml @@ -18,3 +18,19 @@ win_chocolatey: name: openssh state: present + +- name: "Tell EC2Config to set a random password on next boot (Windows 2012)" + when: ((ansible_facts['distribution_major_version'] | int) < 10 and mu_build_image is defined and mu_build_image == True) + win_copy: + src: config.xml + dest: "c:/Program Files/Amazon/EC2ConfigService/Settings/config.xml" + +- name: "Tell EC2Launch to set a random password (Windows 2016+)" + when: ((ansible_facts['distribution_major_version'] | int) >= 10 and mu_build_image is defined and mu_build_image == True) + win_copy: + src: LaunchConfig.json + dest: "c:/ProgramData/Amazon/EC2-Windows/Launch/Config/LaunchConfig.json" + +- name: "Tell EC2Launch to run on next boot (Windows 2016+)" + when: ((ansible_facts['distribution_major_version'] | int) >= 10 and mu_build_image is defined and mu_build_image == True) + win_shell: C:\ProgramData\Amazon\EC2-Windows\Launch\Scripts\InitializeInstance.ps1 -Schedule diff --git a/bin/mu-adopt b/bin/mu-adopt index 954c9383a..d5180edc0 100755 --- a/bin/mu-adopt +++ b/bin/mu-adopt @@ -48,6 +48,7 @@ $opt = Optimist::options do opt :diff, "List the differences between what we find and an existing, saved deploy from a previous run, if one exists.", :required => false, :type => :boolean opt :grouping, "Methods for grouping found resources into separate Baskets.\n\n"+MU::Adoption::GROUPMODES.keys.map { |g| "* "+g.to_s+": "+MU::Adoption::GROUPMODES[g] }.join("\n")+"\n\n", :required => false, :type => :string, :default => "logical" opt :habitats, "Limit scope of searches to the named accounts/projects/subscriptions, instead of search all habitats visible to our credentials.", :required => false, :type => :strings + opt :scrub, "Whether to set scrub_mu_isms in the BoKs we generate", :default => $MU_CFG.has_key?('adopt_scrub_mu_isms') ? $MU_CFG['adopt_scrub_mu_isms'] : false end ok = true @@ -102,7 +103,7 @@ if !ok end -adoption = MU::Adoption.new(clouds: clouds, types: types, parent: $opt[:parent], billing: $opt[:billing], sources: $opt[:sources], credentials: $opt[:credentials], group_by: $opt[:grouping].to_sym, savedeploys: $opt[:savedeploys], diff: $opt[:diff], habitats: $opt[:habitats]) +adoption = MU::Adoption.new(clouds: clouds, types: types, parent: $opt[:parent], billing: $opt[:billing], sources: $opt[:sources], credentials: $opt[:credentials], group_by: $opt[:grouping].to_sym, savedeploys: $opt[:savedeploys], diff: $opt[:diff], habitats: $opt[:habitats], scrub_mu_isms: $opt[:scrub]) found = adoption.scrapeClouds if found.nil? or found.empty? MU.log "No resources found to adopt", MU::WARN, details: {"clouds" => clouds, "types" => types } diff --git a/bin/mu-configure b/bin/mu-configure index e4a82b825..3076d8584 100755 --- a/bin/mu-configure +++ b/bin/mu-configure @@ -113,6 +113,12 @@ $CONFIGURABLES = { "desc" => "Disable the Momma Cat grooming daemon. Nodes which require asynchronous Ansible/Chef bootstraps will not function. This option is only honored in gem-based installations.", "boolean" => true }, + "adopt_scrub_mu_isms" => { + "title" => "Disable Momma Cat", + "default" => false, + "desc" => "Ordinarily, Mu will automatically name, tag and generate auxiliary resources in a standard Mu-ish fashion that allows for deployment of multiple clones of a given stack. Toggling this flag will change the default behavior of mu-adopt, when it creates stack descriptors from found resources, to enable or disable this behavior (see also mu-adopt's --scrub option).", + "boolean" => true + }, "mommacat_port" => { "title" => "Momma Cat Listen Port", "pattern" => /^[0-9]+$/i, @@ -246,6 +252,16 @@ $CONFIGURABLES = { "required" => false, "desc" => "For Google Cloud projects which are attached to a GSuite domain. Some API calls (groups, users, etc) require this identifier. From admin.google.com, choose Security, the Single Sign On, and look for the Entity ID field. The value after idpid= in the URL there should be the customer ID." }, + "ignore_habitats" => { + "title" => "Ignore These Projects", + "desc" => "Optional list of projects to ignore, for credentials which have visibility into multiple projects", + "array" => true + }, + "restrict_to_habitats" => { + "title" => "Operate On Only These Projects", + "desc" => "Optional list of projects to which we'll restrict all of our activities.", + "array" => true + }, "default" => { "title" => "Is Default Account", "default" => false, diff --git a/bin/mu-node-manage b/bin/mu-node-manage index b00180a21..ae50d628e 100755 --- a/bin/mu-node-manage +++ b/bin/mu-node-manage @@ -29,9 +29,9 @@ Usage: opt :all, "Operate on all nodes/deploys. Use with caution.", :require => false, :default => false, :type => :boolean opt :platform, "Operate exclusively on one nodes of a particular operating system. Can be used in conjunction with -a or -d. Valid platforms: linux, windows", :require => false, :type => :string opt :environment, "Operate exclusively on one nodes with a particular environment (e.g. dev, prod). Can be used in conjunction with -a or -d.", :require => false, :type => :string - opt :override_chef_runlist, "An alternate runlist to pass to Chef, in chefrun mode.", :require => false, :type => :string + opt :override_chef_runlist, "An alternate runlist to pass to Chef, in groomeronly mode.", :require => false, :type => :string opt :xecute, "Run a shell command on matching nodes. Overrides --mode and suppresses some informational output in favor of scriptability.", :require => false, :type => :string - opt :mode, "Action to perform on matching nodes. Valid actions: groom, chefrun, awsmeta, vaults, certs, chefupgrade", :require => false, :default => "chefrun", :type => :string + opt :mode, "Action to perform on matching nodes. Valid actions: groom, groomeronly, awsmeta, vaults, certs, chefupgrade", :require => false, :default => "groomeronly", :type => :string opt :verbose, "Show output from Chef runs, etc", :require => false, :default => false, :type => :boolean opt :winrm, "Force WinRM connection. Disable SSH fallback", :require => false, :default => false, :type => :boolean opt :info, "List a particular node attribute", :require => false, :default => 'nodename', :type => :string @@ -39,8 +39,10 @@ end MU.setLogging(MU::Logger::LOUD) if $opts[:verbose] -if !["groom", "chefrun", "vaults", "userdata", "awsmeta", "certs", "chefupgrade"].include?($opts[:mode]) - Optimist::die(:mode, "--mode must be one of: groom, chefrun, awsmeta, vaults, certs, chefupgrade") +$opts[:mode] = "groomeronly" if $opts[:mode] == "chefrun" + +if !["groom", "groomeronly", "vaults", "userdata", "awsmeta", "certs", "chefupgrade"].include?($opts[:mode]) + Optimist::die(:mode, "--mode must be one of: groom, groomeronly, awsmeta, vaults, certs, chefupgrade") end if $opts[:platform] and !["linux", "windows"].include?($opts[:platform]) Optimist::die(:platform, "--platform must be one of: linux, windows") @@ -176,7 +178,7 @@ end exit 1 if !ok -def reGroom(deploys = MU::MommaCat.listDeploys, nodes = [], vaults_only: false) +def reGroom(deploys = MU::MommaCat.listDeploys, nodes = [], vaults_only: false, groomeronly: false) badnodes = [] count = 0 deploys.each { |muid| @@ -196,6 +198,8 @@ def reGroom(deploys = MU::MommaCat.listDeploys, nodes = [], vaults_only: false) server.config["vault_access"].each { |v| MU::Groomer::Chef.grantSecretAccess(mu_name, v['vault'], v['item']) } + elsif groomeronly + server.groomer.run else mommacat.groomNode(server.cloud_id, nodeclass, type, mu_name: mu_name) end @@ -227,7 +231,7 @@ def reGroom(deploys = MU::MommaCat.listDeploys, nodes = [], vaults_only: false) end end -def runCommand(deploys = MU::MommaCat.listDeploys, nodes = [], cmd = nil, print_output: $opts[:verbose], noop: false, chefrun: false, chef_runlist: nil) +def runCommand(deploys = MU::MommaCat.listDeploys, nodes = [], cmd = nil, print_output: $opts[:verbose], noop: false) badnodes = [] count = 0 deploys.each { |muid| @@ -247,12 +251,6 @@ def runCommand(deploys = MU::MommaCat.listDeploys, nodes = [], cmd = nil, print_ next end - # Generate the command if attemting a chef run - if chefrun - cmd = serverobj.windows? ? "powershell -Command chef-client" : "chef-client || sudo chef-client" - cmd += " -o '#{chef_runlist}'" if chef_runlist - end - MU.log "Running '#{cmd}' on #{nodename} (##{count})" if !print_output # Set Variables to catch the output and exit code of the execution @@ -363,7 +361,7 @@ def runCommand(deploys = MU::MommaCat.listDeploys, nodes = [], cmd = nil, print_ } if badnodes.size > 0 - cmd = "Chef" if $opts[:mode] == "chefrun" + cmd = "Chef" if $opts[:mode] == "groomeronly" if !print_output MU.log "Not all `#{cmd}` runs exited cleanly", MU::WARN, details: badnodes else @@ -687,12 +685,13 @@ elsif $opts[:mode] == "vaults" reGroom(do_deploys, do_nodes, vaults_only: true) elsif $opts[:mode] == "chefupgrade" chefUpgrade(do_deploys, do_nodes) -elsif $opts[:mode] == "chefrun" +elsif $opts[:mode] == "groomeronly" print_output = $opts[:verbose] || do_nodes.size == 1 if $opts[:override_chef_runlist] - runCommand(do_deploys, do_nodes, chef_runlist: $opts[:override_chef_runlist], chefrun: true, print_output: print_output) +# runCommand(do_deploys, do_nodes, chef_runlist: $opts[:override_chef_runlist], groomeronly: true, print_output: print_output) else - runCommand(do_deploys, do_nodes, chefrun: true, print_output: print_output) +# runCommand(do_deploys, do_nodes, groomeronly: true, print_output: print_output) + reGroom(do_deploys, do_nodes, groomeronly: true) end elsif $opts[:mode] == "userdata" or $opts[:mode] == "awsmeta" # Need Google equiv and to select nodes correctly based on what cloud they're in diff --git a/cloud-mu.gemspec b/cloud-mu.gemspec index ca1b31332..ff097c411 100644 --- a/cloud-mu.gemspec +++ b/cloud-mu.gemspec @@ -17,8 +17,8 @@ end Gem::Specification.new do |s| s.name = 'cloud-mu' - s.version = '3.1.5' - s.date = '2020-03-03' + s.version = '3.1.6' + s.date = '2020-03-20' s.require_paths = ['modules'] s.required_ruby_version = '>= 2.4' s.summary = "The eGTLabs Mu toolkit for unified cloud deployments" diff --git a/cookbooks/mu-activedirectory/resources/domain.rb b/cookbooks/mu-activedirectory/resources/domain.rb index ff1ed2531..a5e7a25a1 100644 --- a/cookbooks/mu-activedirectory/resources/domain.rb +++ b/cookbooks/mu-activedirectory/resources/domain.rb @@ -19,7 +19,7 @@ attribute :restore_mode_password, :kind_of => String, :required => true attribute :site_name, :kind_of => String, :default => node['ad']['site_name'], :required => false attribute :computer_name, :kind_of => String, :default => node['ad']['computer_name'] -attribute :ntds_static_port, :kind_of => Fixnum, :default => node['ad']['ntds_static_port'] -attribute :ntfrs_static_port, :kind_of => Fixnum, :default => node['ad']['ntfrs_static_port'] -attribute :dfsr_static_port, :kind_of => Fixnum, :default => node['ad']['dfsr_static_port'] -attribute :netlogon_static_port, :kind_of => Fixnum, :default => node['ad']['netlogon_static_port'] +attribute :ntds_static_port, :kind_of => Integer, :default => node['ad']['ntds_static_port'] +attribute :ntfrs_static_port, :kind_of => Integer, :default => node['ad']['ntfrs_static_port'] +attribute :dfsr_static_port, :kind_of => Integer, :default => node['ad']['dfsr_static_port'] +attribute :netlogon_static_port, :kind_of => Integer, :default => node['ad']['netlogon_static_port'] diff --git a/cookbooks/mu-activedirectory/resources/domain_controller.rb b/cookbooks/mu-activedirectory/resources/domain_controller.rb index 4f4cedf55..cd6bf803b 100644 --- a/cookbooks/mu-activedirectory/resources/domain_controller.rb +++ b/cookbooks/mu-activedirectory/resources/domain_controller.rb @@ -19,7 +19,7 @@ attribute :restore_mode_password, :kind_of => String, :required => true attribute :site_name, :kind_of => String, :default => node['ad']['site_name'], :required => false attribute :computer_name, :kind_of => String, :default => node['ad']['computer_name'] -attribute :ntds_static_port, :kind_of => Fixnum, :default => node['ad']['ntds_static_port'] -attribute :ntfrs_static_port, :kind_of => Fixnum, :default => node['ad']['ntfrs_static_port'] -attribute :dfsr_static_port, :kind_of => Fixnum, :default => node['ad']['dfsr_static_port'] -attribute :netlogon_static_port, :kind_of => Fixnum, :default => node['ad']['netlogon_static_port'] +attribute :ntds_static_port, :kind_of => Integer, :default => node['ad']['ntds_static_port'] +attribute :ntfrs_static_port, :kind_of => Integer, :default => node['ad']['ntfrs_static_port'] +attribute :dfsr_static_port, :kind_of => Integer, :default => node['ad']['dfsr_static_port'] +attribute :netlogon_static_port, :kind_of => Integer, :default => node['ad']['netlogon_static_port'] diff --git a/cookbooks/mu-tools/recipes/eks.rb b/cookbooks/mu-tools/recipes/eks.rb index fe5c879af..a414aa2a6 100644 --- a/cookbooks/mu-tools/recipes/eks.rb +++ b/cookbooks/mu-tools/recipes/eks.rb @@ -160,8 +160,8 @@ opento.uniq.each { |src| [:tcp, :udp, :icmp].each { |proto| - execute "iptables -I INPUT -p #{proto} -s #{src}" do - not_if "iptables -L -n | tr -s ' ' | grep -- '#{proto} -- #{src.sub(/\/32$/, "")}' > /dev/null" + execute "iptables -w 30 -I INPUT -p #{proto} -s #{src}" do + not_if "iptables -w 30 -L -n | tr -s ' ' | grep -- '#{proto} -- #{src.sub(/\/32$/, "")}' > /dev/null" end } } diff --git a/cookbooks/mu-tools/recipes/windows-client.rb b/cookbooks/mu-tools/recipes/windows-client.rb index 9db44a976..b6fa4e562 100644 --- a/cookbooks/mu-tools/recipes/windows-client.rb +++ b/cookbooks/mu-tools/recipes/windows-client.rb @@ -26,20 +26,22 @@ sshd_password = windows_vault[node['windows_sshd_password_field']] + admin_user = node['windows_admin_username'] || "Administrator" + windows_version = node['platform_version'].to_i public_keys = Array.new - if windows_version == 10 + if windows_version >= 10 Chef::Log.info "version #{windows_version}, using openssh" include_recipe 'chocolatey' openssh_path = 'C:\Program Files\OpenSSH-Win64' - ssh_program_data = "#{ENV['ProgramData']}/ssh" + ssh_program_data = "#{ENV['ProgramData']}\\ssh" - ssh_dir = "C:/Users/Administrator/.ssh" + ssh_dir = "C:/Users/#{admin_user}/.ssh" authorized_keys = "#{ssh_dir}/authorized_keys" @@ -86,7 +88,8 @@ path ssh_program_data owner sshd_user rights :full_control, sshd_user - rights :full_control, 'Administrator' + rights :full_control, admin_user + notifies :run, 'ruby[find files to change ownership of]', :immediately notifies :run, 'powershell_script[Generate Host Key]', :immediately end @@ -97,22 +100,22 @@ notifies :create, "template[#{ssh_program_data}/sshd_config]", :immediately end - template "#{ssh_program_data}/sshd_config" do + directory "set file ownership" do action :nothing + path ssh_program_data owner sshd_user - source "sshd_config.erb" mode '0600' - cookbook "mu-tools" - notifies :run, 'ruby[find files to change ownership of]', :immediately + rights :full_control, sshd_user + deny_rights :full_control, admin_user end - directory "set file ownership" do + template "#{ssh_program_data}/sshd_config" do action :nothing - path ssh_program_data owner sshd_user + source "sshd_config.erb" mode '0600' - rights :full_control, sshd_user - deny_rights :full_control, 'Administrator' + cookbook "mu-tools" + notifies :run, 'ruby[find files to change ownership of]', :immediately end windows_service 'sshd' do @@ -120,26 +123,26 @@ end group 'sshusers' do - members [sshd_user, 'Administrator'] + members [sshd_user, admin_user] end ruby 'find files to change ownership of' do action :nothing code <<-EOH - files = Dir.entries ssh_program_data + files = Dir.entries '#{ssh_program_data}' puts files EOH end - log 'files in ssh' do - message files.join - level :info - end - +# log 'files in ssh' do +# message files.join +# level :info +# end +# files.each do |file| file "#{ssh_program_data}#{file}" do owner sshd_user - deny_rights :full_control, 'Administrator' + deny_rights :full_control, admin_user end end @@ -150,7 +153,7 @@ end file authorized_keys do - owner 'Administrator' + owner admin_user content public_key end @@ -323,7 +326,7 @@ # sensitive true # end # end -# end + end else diff --git a/extras/clean-stock-amis b/extras/clean-stock-amis index ed41b5e36..7893ea019 100644 --- a/extras/clean-stock-amis +++ b/extras/clean-stock-amis @@ -18,37 +18,43 @@ require 'json' require File.realpath(File.expand_path(File.dirname(__FILE__)+"/../bin/mu-load-config.rb")) require 'mu' -credentials = if ARGV[0] and !ARGV[0].empty? - ARGV[0] -else - nil +$opts = Optimist::options do + banner <<-EOS +#{$0} [-c credentials] [-i imagename] + EOS + opt :credentials, "Use these AWS credentials from mu.yaml instead of the default set", :required => false, :type => :string + opt :image, "Purge a specific image, instead of just scrubing old ones", :required => false, :type => :string end filters = [ { name: "owner-id", - values: [MU::Cloud::AWS.credToAcct(credentials)] + values: [MU::Cloud::AWS.credToAcct($opts[:credentials])] } ] MU::Cloud::AWS.listRegions.each { | r| - images = MU::Cloud::AWS.ec2(region: r, credentials: credentials).describe_images( + images = MU::Cloud::AWS.ec2(region: r, credentials: $opts[:credentials]).describe_images( filters: filters + [{ "name" => "state", "values" => ["available"]}] ).images images.each { |ami| - if (DateTime.now.to_time - DateTime.parse(ami.creation_date).to_time) > 15552000 and ami.name.match(/^MU-(PROD|DEV)/) - snaps = [] - ami.block_device_mappings.each { |dev| - if !dev.ebs.nil? - snaps << dev.ebs.snapshot_id - end - } - MU.log "Deregistering #{ami.name} (#{ami.creation_date})", MU::WARN, details: snaps - MU::Cloud::AWS.ec2(region: r, credentials: credentials).deregister_image(image_id: ami.image_id) - snaps.each { |snap_id| - MU::Cloud::AWS.ec2(region: r, credentials: credentials).delete_snapshot(snapshot_id: snap_id) - } - end + if ($opts[:image] and ami.name == $opts[:image]) or + ((DateTime.now.to_time - DateTime.parse(ami.creation_date).to_time) > 15552000 and ami.name.match(/^MU-(PROD|DEV)/)) + snaps = [] + ami.block_device_mappings.each { |dev| + if !dev.ebs.nil? + snaps << dev.ebs.snapshot_id + end + } + MU.log "Deregistering #{ami.name}, #{r} (#{ami.creation_date})", MU::WARN, details: snaps + begin + MU::Cloud::AWS.ec2(region: r, credentials: $opts[:credentials]).deregister_image(image_id: ami.image_id) + rescue Aws::EC2::Errors::InvalidAMIIDUnavailable + end + snaps.each { |snap_id| + MU::Cloud::AWS.ec2(region: r, credentials: $opts[:credentials]).delete_snapshot(snapshot_id: snap_id) + } + end } } diff --git a/extras/image-generators/AWS/win2k12.yaml b/extras/image-generators/AWS/win2k12.yaml index 8fd91b555..06433741b 100644 --- a/extras/image-generators/AWS/win2k12.yaml +++ b/extras/image-generators/AWS/win2k12.yaml @@ -12,6 +12,8 @@ groomer: Ansible run_list: - mu-windows + ansible_vars: + mu_build_image: true create_image: image_then_destroy: true public: true diff --git a/extras/image-generators/AWS/win2k16.yaml b/extras/image-generators/AWS/win2k16.yaml index 34c0a9e19..f53e0067a 100644 --- a/extras/image-generators/AWS/win2k16.yaml +++ b/extras/image-generators/AWS/win2k16.yaml @@ -12,6 +12,8 @@ groomer: Ansible run_list: - mu-windows + ansible_vars: + mu_build_image: true create_image: image_then_destroy: true public: true diff --git a/extras/image-generators/AWS/win2k19.yaml b/extras/image-generators/AWS/win2k19.yaml index 3988d6ce3..6b9d8e22d 100644 --- a/extras/image-generators/AWS/win2k19.yaml +++ b/extras/image-generators/AWS/win2k19.yaml @@ -12,6 +12,8 @@ groomer: Ansible run_list: - mu-windows + ansible_vars: + mu_build_image: true create_image: image_then_destroy: true public: true diff --git a/modules/Gemfile.lock b/modules/Gemfile.lock index c0b09a4d1..760bab09c 100644 --- a/modules/Gemfile.lock +++ b/modules/Gemfile.lock @@ -10,7 +10,7 @@ GIT PATH remote: .. specs: - cloud-mu (3.1.5) + cloud-mu (3.1.6) addressable (~> 2.5) aws-sdk-core (< 3) azure_sdk (~> 0.52) @@ -54,7 +54,7 @@ GEM public_suffix (>= 2.0.2, < 4.0) ast (2.4.0) aws-eventstream (1.0.3) - aws-sdk-core (2.11.456) + aws-sdk-core (2.11.470) aws-sigv4 (~> 1.0) jmespath (~> 1.0) aws-sigv4 (1.1.1) @@ -164,7 +164,7 @@ GEM ms_rest_azure (~> 0.11.0) azure_mgmt_container_registry (0.18.3) ms_rest_azure (~> 0.11.1) - azure_mgmt_container_service (0.20.0) + azure_mgmt_container_service (0.20.1) ms_rest_azure (~> 0.11.1) azure_mgmt_cosmosdb (0.21.0) ms_rest_azure (~> 0.11.1) @@ -172,7 +172,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_customer_insights (0.17.2) ms_rest_azure (~> 0.11.0) - azure_mgmt_data_factory (0.18.0) + azure_mgmt_data_factory (0.18.1) ms_rest_azure (~> 0.11.1) azure_mgmt_data_migration (0.18.0) ms_rest_azure (~> 0.11.1) @@ -312,7 +312,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_stor_simple8000_series (0.17.2) ms_rest_azure (~> 0.11.0) - azure_mgmt_storage (0.19.2) + azure_mgmt_storage (0.19.3) ms_rest_azure (~> 0.11.1) azure_mgmt_storagecache (0.17.1) ms_rest_azure (~> 0.11.1) @@ -330,7 +330,7 @@ GEM ms_rest_azure (~> 0.11.1) azure_mgmt_web (0.17.5) ms_rest_azure (~> 0.11.1) - azure_sdk (0.52.0) + azure_sdk (0.52.1) azure-storage (~> 0.14.0.preview) azure_cognitiveservices_anomalydetector (~> 0.17.0) azure_cognitiveservices_autosuggest (~> 0.17.1) @@ -380,11 +380,11 @@ GEM azure_mgmt_consumption (~> 0.18.0) azure_mgmt_container_instance (~> 0.17.4) azure_mgmt_container_registry (~> 0.18.3) - azure_mgmt_container_service (~> 0.20.0) + azure_mgmt_container_service (~> 0.20.1) azure_mgmt_cosmosdb (~> 0.21.0) azure_mgmt_cost_management (~> 0.17.0) azure_mgmt_customer_insights (~> 0.17.2) - azure_mgmt_data_factory (~> 0.18.0) + azure_mgmt_data_factory (~> 0.18.1) azure_mgmt_data_migration (~> 0.18.0) azure_mgmt_databox (~> 0.17.0) azure_mgmt_datalake_analytics (~> 0.17.2) @@ -454,7 +454,7 @@ GEM azure_mgmt_sql (~> 0.19.0) azure_mgmt_sqlvirtualmachine (~> 0.18.1) azure_mgmt_stor_simple8000_series (~> 0.17.2) - azure_mgmt_storage (~> 0.19.2) + azure_mgmt_storage (~> 0.19.3) azure_mgmt_storagecache (~> 0.17.1) azure_mgmt_storagesync (~> 0.18.0) azure_mgmt_stream_analytics (~> 0.17.2) @@ -563,8 +563,7 @@ GEM cucumber-tag_expressions (~> 2.0, >= 2.0.2) cucumber-gherkin (10.0.0) cucumber-messages (~> 10.0, >= 10.0.1) - cucumber-messages (10.0.1) - json (~> 2.3, >= 2.3.0) + cucumber-messages (10.0.3) protobuf-cucumber (~> 3.10, >= 3.10.8) cucumber-tag_expressions (2.0.2) daemons (1.3.1) @@ -628,7 +627,6 @@ GEM ipaddress (0.8.3) jaro_winkler (1.5.4) jmespath (1.4.0) - json (2.3.0) json-schema (2.8.1) addressable (>= 2.4) jwt (2.2.1) @@ -649,7 +647,7 @@ GEM mixlib-cli (1.7.0) mixlib-config (3.0.6) tomlrb - mixlib-install (3.11.26) + mixlib-install (3.12.1) mixlib-shellout mixlib-versioning thor @@ -657,15 +655,15 @@ GEM mixlib-shellout (2.4.4) mixlib-versioning (1.2.12) molinillo (0.6.6) - ms_rest (0.7.5) + ms_rest (0.7.6) concurrent-ruby (~> 1.0) - faraday (~> 0.9) + faraday (>= 0.9, < 2.0.0) timeliness (~> 0.3.10) - ms_rest_azure (0.11.1) + ms_rest_azure (0.11.2) concurrent-ruby (~> 1.0) - faraday (~> 0.9) + faraday (>= 0.9, < 2.0.0) faraday-cookie_jar (~> 0.0.6) - ms_rest (~> 0.7.4) + ms_rest (~> 0.7.6) unf_ext (= 0.0.7.2) multi_json (1.14.1) multipart-post (2.1.1) @@ -687,7 +685,7 @@ GEM mini_portile2 (~> 2.4.0) nori (2.6.0) numerizer (0.1.1) - octokit (4.16.0) + octokit (4.17.0) faraday (>= 0.9) sawyer (~> 0.8.0, >= 0.5.3) ohai (14.14.0) @@ -734,7 +732,7 @@ GEM rspec-mocks (~> 3.9.0) rspec-core (3.9.1) rspec-support (~> 3.9.1) - rspec-expectations (3.9.0) + rspec-expectations (3.9.1) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.9.0) rspec-its (1.3.0) @@ -755,11 +753,12 @@ GEM rexml ruby-progressbar (~> 1.7) unicode-display_width (>= 1.4.0, < 1.7) - ruby-graphviz (1.2.4) + ruby-graphviz (1.2.5) + rexml ruby-progressbar (1.10.1) ruby-wmi (0.4.0) rubyntlm (0.6.2) - rubyzip (2.2.0) + rubyzip (2.3.0) rufus-lru (1.1.0) sawyer (0.8.2) addressable (>= 2.3.5) @@ -781,7 +780,7 @@ GEM solve (4.0.3) molinillo (~> 0.6) semverse (>= 1.1, < 4.0) - specinfra (2.82.10) + specinfra (2.82.12) net-scp net-ssh (>= 2.7) net-telnet (= 0.1.1) @@ -826,7 +825,7 @@ GEM winrm (~> 2.0) wmi-lite (1.0.5) yard (0.9.24) - zeitwerk (2.2.2) + zeitwerk (2.3.0) PLATFORMS ruby diff --git a/modules/mommacat.ru b/modules/mommacat.ru index 53cab65d7..2fdf2a6c6 100644 --- a/modules/mommacat.ru +++ b/modules/mommacat.ru @@ -51,7 +51,7 @@ Signal.trap("URG") do end begin - MU::MommaCat.syncMonitoringConfig(false) + MU::Master.syncMonitoringConfig(false) rescue StandardError => e MU.log e.inspect, MU::ERR, details: e.backtrace # ...but don't die! diff --git a/modules/mu.rb b/modules/mu.rb index 6a3561dd7..cf5d0e68c 100644 --- a/modules/mu.rb +++ b/modules/mu.rb @@ -273,8 +273,8 @@ def initialize(*args, &block) # Wrapper class for fatal Exceptions. Gives our internals something to # inherit that will log an error message appropriately before bubbling up. class MuError < StandardError - def initialize(message = nil) - MU.log message, MU::ERR, details: caller[2] if !message.nil? + def initialize(message = nil, silent: false) + MU.log message, MU::ERR, details: caller[2] if !message.nil? and !silent if MU.verbosity == MU::Logger::SILENT super "" else @@ -286,8 +286,8 @@ def initialize(message = nil) # Wrapper class for temporary Exceptions. Gives our internals something to # inherit that will log a notice message appropriately before bubbling up. class MuNonFatal < StandardError - def initialize(message = nil) - MU.log message, MU::NOTICE if !message.nil? + def initialize(message = nil, silent: false) + MU.log message, MU::NOTICE if !message.nil? and !silent if MU.verbosity == MU::Logger::SILENT super "" else @@ -598,9 +598,10 @@ def self.summary end # Shortcut to invoke {MU::Logger#log} - def self.log(msg, level = MU::INFO, details: nil, html: false, verbosity: nil, color: true) + def self.log(msg, level = MU::INFO, shorthand_details = nil, details: nil, html: false, verbosity: nil, color: true) return if (level == MU::DEBUG and verbosity and verbosity <= MU::Logger::LOUD) return if verbosity and verbosity == MU::Logger::SILENT + details ||= shorthand_details if (level == MU::ERR or level == MU::WARN or diff --git a/modules/mu/adoption.rb b/modules/mu/adoption.rb index a3f511e53..2d59e8431 100644 --- a/modules/mu/adoption.rb +++ b/modules/mu/adoption.rb @@ -30,7 +30,7 @@ class Incomplete < MU::MuNonFatal; end :omnibus => "Jam everything into one monolothic configuration" } - def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: []) + def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_types.keys, parent: nil, billing: nil, sources: nil, credentials: nil, group_by: :logical, savedeploys: false, diff: false, habitats: [], scrub_mu_isms: false) @scraped = {} @clouds = clouds @types = types @@ -45,6 +45,7 @@ def initialize(clouds: MU::Cloud.supportedClouds, types: MU::Cloud.resource_type @diff = diff @habitats = habitats @habitats ||= [] + @scrub_mu_isms = scrub_mu_isms end # Walk cloud providers with available credentials to discover resources @@ -65,6 +66,11 @@ def scrapeClouds() cloudclass.listCredentials.each { |credset| next if @sources and !@sources.include?(credset) + cfg = cloudclass.credConfig(credset) + if cfg and cfg['restrict_to_habitats'] + cfg['restrict_to_habitats'] << cfg['project'] if cfg['project'] + end + if @parent # TODO handle different inputs (cloud_id, etc) # TODO do something about vague matches @@ -101,15 +107,21 @@ def scrapeClouds() allow_multi: true, habitats: @habitats.dup, dummy_ok: true, - debug: false, - flags: { "skip_provider_owned" => true } + skip_provider_owned: true, +# debug: false#, ) if found and found.size > 0 + if resclass.cfg_plural == "habitats" + found.reject! { |h| !cloudclass.listHabitats(credset).include?(h) } + end MU.log "Found #{found.size.to_s} raw #{resclass.cfg_plural} in #{cloud}" @scraped[type] ||= {} found.each { |obj| + if obj.habitat and !cloudclass.listHabitats(credset).include?(obj.habitat) + next + end # XXX apply any filters (e.g. MU-ID tags) @scraped[type][obj.cloud_id] = obj } @@ -190,6 +202,9 @@ def generateBaskets(prefix: "") groupings.each_pair { |appname, types| bok = { "appname" => prefix+appname } + if @scrub_mu_isms + bok["scrub_mu_isms"] = true + end if @target_creds bok["credentials"] = @target_creds end @@ -333,7 +348,7 @@ def scrubSchemaDefaults(conf_chunk, schema_chunk, depth = 0, type: nil) deletia = [] schema_chunk["properties"].each_pair { |key, subschema| next if !conf_chunk[key] - shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(key) + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(key, false) if subschema["default_if"] subschema["default_if"].each { |cond| diff --git a/modules/mu/cleanup.rb b/modules/mu/cleanup.rb index 53fe7da5f..a0181d60e 100644 --- a/modules/mu/cleanup.rb +++ b/modules/mu/cleanup.rb @@ -30,6 +30,8 @@ class Cleanup @onlycloud = false @skipcloud = false + TYPES_IN_ORDER = ["Collection", "Endpoint", "Function", "ServerPool", "ContainerCluster", "SearchDomain", "Server", "MsgQueue", "Database", "CacheCluster", "StoragePool", "LoadBalancer", "NoSQLDB", "FirewallRule", "Alarm", "Notifier", "Log", "VPC", "Role", "Group", "User", "Bucket", "DNSZone", "Collection"] + # Purge all resources associated with a deployment. # @param deploy_id [String]: The identifier of the deployment to remove (typically seen in the MU-ID tag on a resource). # @param noop [Boolean]: Do not delete resources, merely list what would be deleted. @@ -54,14 +56,7 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver @noop = true end - if MU.mu_user != "mu" - MU.setVar("dataDir", Etc.getpwnam(MU.mu_user).dir+"/.mu/var") - else - MU.setVar("dataDir", MU.mainDataDir) - end - - - types_in_order = ["Collection", "Endpoint", "Function", "ServerPool", "ContainerCluster", "SearchDomain", "Server", "MsgQueue", "Database", "CacheCluster", "StoragePool", "LoadBalancer", "NoSQLDB", "FirewallRule", "Alarm", "Notifier", "Log", "VPC", "Role", "Group", "User", "Bucket", "DNSZone", "Collection"] + MU.setVar("dataDir", (MU.mu_user == "mu" ? MU.mainDataDir : Etc.getpwnam(MU.mu_user).dir+"/.mu/var")) # Load up our deployment metadata if !mommacat.nil? @@ -82,172 +77,24 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver rescue StandardError => e MU.log "Can't load a deploy record for #{deploy_id} (#{e.inspect}), cleaning up resources by guesswork", MU::WARN, details: e.backtrace MU.setVar("deploy_id", deploy_id) - end end - regionsused = @mommacat.regionsUsed if @mommacat - credsused = @mommacat.credsUsed if @mommacat - habitatsused = @mommacat.habitatsUsed if @mommacat + @regionsused = @mommacat.regionsUsed if @mommacat + @credsused = @mommacat.credsUsed if @mommacat + @habitatsused = @mommacat.habitatsUsed if @mommacat if !@skipcloud - creds = {} - MU::Cloud.availableClouds.each { |cloud| - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - if $MU_CFG[cloud.downcase] and $MU_CFG[cloud.downcase].size > 0 - creds[cloud] ||= {} - cloudclass.listCredentials.each { |credset| - next if credsets and credsets.size > 0 and !credsets.include?(credset) - next if credsused and credsused.size > 0 and !credsused.include?(credset) - MU.log "Will scan #{cloud} with credentials #{credset}" - creds[cloud][credset] = cloudclass.listRegions(credentials: credset) - } - else - if cloudclass.hosted? - creds[cloud] ||= {} - creds[cloud]["#default"] = cloudclass.listRegions - end - end - } + creds = listUsedCredentials(credsets) - parent_thread_id = Thread.current.object_id cloudthreads = [] - keyname = "deploy-#{MU.deploy_id}" + had_failures = false creds.each_pair { |provider, credsets_outer| cloudthreads << Thread.new(provider, credsets_outer) { |cloud, credsets_inner| - MU.dupGlobals(parent_thread_id) Thread.abort_on_exception = false - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - habitatclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get("Habitat") - credsets_inner.each_pair { |credset, acct_regions| - next if credsused and !credsused.include?(credset) - global_vs_region_semaphore = Mutex.new - global_done = {} - regionthreads = [] - acct_regions.each { |r| - if regionsused - if regionsused.size > 0 - next if !regionsused.include?(r) - else - next if r != cloudclass.myRegion(credset) - end - end - if regions and !regions.empty? - next if !regions.include?(r) - MU.log "Checking for #{cloud}/#{credset} resources from #{MU.deploy_id} in #{r}...", MU::NOTICE - end - regionthreads << Thread.new { - MU.dupGlobals(parent_thread_id) - Thread.abort_on_exception = false - MU.setVar("curRegion", r) - projects = [] - if habitats - projects = habitats - else - if $MU_CFG and $MU_CFG[cloud.downcase] and - $MU_CFG[cloud.downcase][credset] and - $MU_CFG[cloud.downcase][credset]["project"] -# XXX GCP credential schema needs an array for projects - projects << $MU_CFG[cloud.downcase][credset]["project"] - end - begin - projects.concat(cloudclass.listProjects(credset)) - rescue NoMethodError - end - end - - if projects == [] - projects << "" # dummy - MU.log "Checking for #{cloud}/#{credset} resources from #{MU.deploy_id} in #{r}", MU::NOTICE - end - projects.uniq! - - # We do these in an order that unrolls dependent resources - # sensibly, and we hit :Collection twice because AWS - # CloudFormation sometimes fails internally. - projectthreads = [] - projects.each { |project| - if habitats and !habitats.empty? and project != "" - next if !habitats.include?(project) - end - if habitatsused and !habitatsused.empty? and project != "" - next if !habitatsused.include?(project) - end - next if !habitatclass.isLive?(project, credset) - - projectthreads << Thread.new { - MU.dupGlobals(parent_thread_id) - MU.setVar("curRegion", r) - Thread.abort_on_exception = false - if project != "" - MU.log "Checking for #{cloud}/#{credset} resources from #{MU.deploy_id} in #{r}, project #{project}", MU::NOTICE - end - - MU.dupGlobals(parent_thread_id) - flags = { - "project" => project, - "onlycloud" => @onlycloud, - "skipsnapshots" => @skipsnapshots, - } - types_in_order.each { |t| - begin - skipme = false - global_vs_region_semaphore.synchronize { - MU::Cloud.loadCloudType(cloud, t) - if Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(t).isGlobal? - global_done[project] ||= [] - if !global_done[project].include?(t) - global_done[project] << t - flags['global'] = true - else - skipme = true - end - end - } - next if skipme - rescue MU::Cloud::MuDefunctHabitat, MU::Cloud::MuCloudResourceNotImplemented => e - next - rescue MU::MuError, NoMethodError => e - MU.log "While checking mu/clouds/#{cloud.downcase}/#{cloudclass.cfg_name} for global-ness in cleanup: "+e.message, MU::WARN - next - rescue ::Aws::EC2::Errors::AuthFailure, ::Google::Apis::ClientError => e - MU.log e.message+" in "+r, MU::ERR - next - end - - begin - if !self.call_cleanup(t, credset, cloud, flags, r) - had_failures = true - end - rescue MU::Cloud::MuDefunctHabitat, MU::Cloud::MuCloudResourceNotImplemented => e - next - end - } - } # types_in_order.each { |t| - } # projects.each { |project| - projectthreads.each do |t| - t.join - end - - # XXX move to MU::AWS - if cloud == "AWS" - resp = MU::Cloud::AWS.ec2(region: r, credentials: credset).describe_key_pairs( - filters: [{name: "key-name", values: [keyname]}] - ) - resp.data.key_pairs.each { |keypair| - MU.log "Deleting key pair #{keypair.key_name} from #{r}" - MU::Cloud::AWS.ec2(region: r, credentials: credset).delete_key_pair(key_name: keypair.key_name) if !@noop - } - end - } # regionthreads << Thread.new { - } # acct_regions.each { |r| - regionthreads.each do |t| - t.join - end - - } # credsets.each_pair { |credset, acct_regions| + cleanCloud(cloud, habitats, regions, credsets_inner) } # cloudthreads << Thread.new(provider, credsets) { |cloud, credsets_outer| cloudthreads.each do |t| t.join @@ -259,22 +106,19 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver # once they're all done. creds.each_pair { |provider, credsets_inner| credsets_inner.keys.each { |credset| - next if credsused and !credsused.include?(credset) + next if @credsused and !@credsused.include?(credset) ["Habitat", "Folder"].each { |t| flags = { "onlycloud" => @onlycloud, "skipsnapshots" => @skipsnapshots } - if !self.call_cleanup(t, credset, provider, flags, nil) + if !call_cleanup(t, credset, provider, flags, nil) had_failures = true end } } } - MU::Cloud::Google.removeDeploySecretsAndRoles(MU.deploy_id) -# XXX port AWS equivalent behavior and add a MU::Cloud wrapper - creds.each_pair { |provider, credsets_inner| cloudclass = Object.const_get("MU").const_get("Cloud").const_get(provider) credsets_inner.keys.each { |c| @@ -284,44 +128,16 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver end # Scrub any residual Chef records with matching tags - if !@onlycloud and (@mommacat.nil? or @mommacat.numKittens(types: ["Server", "ServerPool"]) > 0) and !(Gem.paths and Gem.paths.home and !Dir.exist?("/opt/mu/lib")) - begin - MU::Groomer::Chef.loadChefLib - if File.exist?(Etc.getpwuid(Process.uid).dir+"/.chef/knife.rb") - Chef::Config.from_file(Etc.getpwuid(Process.uid).dir+"/.chef/knife.rb") - end - deadnodes = [] - Chef::Config[:environment] = MU.environment - q = Chef::Search::Query.new - begin - q.search("node", "tags_MU-ID:#{MU.deploy_id}").each { |item| - next if item.is_a?(Integer) - item.each { |node| - deadnodes << node.name - } - } - rescue Net::HTTPServerException - end - - begin - q.search("node", "name:#{MU.deploy_id}-*").each { |item| - next if item.is_a?(Integer) - item.each { |node| - deadnodes << node.name - } - } - rescue Net::HTTPServerException - end - MU.log "Missed some Chef resources in node cleanup, purging now", MU::NOTICE if deadnodes.size > 0 - deadnodes.uniq.each { |node| - MU::Groomer::Chef.cleanup(node, [], noop) - } - rescue LoadError - end + if !@onlycloud and (@mommacat.nil? or @mommacat.numKittens(types: ["Server", "ServerPool"]) > 0) and !@noop + MU.supportedGroomers.each { |g| + groomer = MU::Groomer.loadGroomer(g) + groomer.cleanup(MU.deploy_id, @noop) + } end if had_failures MU.log "Had cleanup failures, exiting", MU::ERR + File.unlink("#{deploy_dir}/.cleanup") if !@noop exit 1 end @@ -329,99 +145,174 @@ def self.run(deploy_id, noop: false, skipsnapshots: false, onlycloud: false, ver @mommacat.purge! end - myhome = Etc.getpwuid(Process.uid).dir - sshdir = "#{myhome}/.ssh" - sshconf = "#{sshdir}/config" - ssharchive = "#{sshdir}/archive" - - Dir.mkdir(sshdir, 0700) if !Dir.exist?(sshdir) and !@noop - Dir.mkdir(ssharchive, 0700) if !Dir.exist?(ssharchive) and !@noop + if !@onlycloud + MU::Master.purgeDeployFromSSH(MU.deploy_id, noop: @noop) + end - keyname = "deploy-#{MU.deploy_id}" - if File.exist?("#{sshdir}/#{keyname}") - MU.log "Moving #{sshdir}/#{keyname} to #{ssharchive}/#{keyname}" - if !@noop - File.rename("#{sshdir}/#{keyname}", "#{ssharchive}/#{keyname}") - end + if !@noop and !@skipcloud and (@mommacat.nil? or @mommacat.numKittens(types: ["Server", "ServerPool"]) > 0) +# MU::Master.syncMonitoringConfig end - if File.exist?(sshconf) and File.open(sshconf).read.match(/\/deploy\-#{MU.deploy_id}$/) - MU.log "Expunging #{MU.deploy_id} from #{sshconf}" - if !@noop - FileUtils.copy(sshconf, "#{ssharchive}/config-#{MU.deploy_id}") - File.open(sshconf, File::CREAT|File::RDWR, 0600) { |f| - f.flock(File::LOCK_EX) - newlines = Array.new - delete_block = false - f.readlines.each { |line| - if line.match(/^Host #{MU.deploy_id}\-/) - delete_block = true - elsif line.match(/^Host /) - delete_block = false - end - newlines << line if !delete_block - } - f.rewind - f.truncate(0) - f.puts(newlines) - f.flush - f.flock(File::LOCK_UN) + end + + def self.listUsedCredentials(credsets) + creds = {} + MU::Cloud.availableClouds.each { |cloud| + cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + if $MU_CFG[cloud.downcase] and $MU_CFG[cloud.downcase].size > 0 + creds[cloud] ||= {} + cloudclass.listCredentials.each { |credset| + next if credsets and credsets.size > 0 and !credsets.include?(credset) + next if @credsused and @credsused.size > 0 and !@credsused.include?(credset) + MU.log "Will scan #{cloud} with credentials #{credset}" + creds[cloud][credset] = cloudclass.listRegions(credentials: credset) } + else + if cloudclass.hosted? + creds[cloud] ||= {} + creds[cloud]["#default"] = cloudclass.listRegions + end end - end - - # XXX refactor with above? They're similar, ish. - hostsfile = "/etc/hosts" - if File.open(hostsfile).read.match(/ #{MU.deploy_id}\-/) - if Process.uid == 0 - MU.log "Expunging traces of #{MU.deploy_id} from #{hostsfile}" - if !@noop - FileUtils.copy(hostsfile, "#{hostsfile}.cleanup-#{deploy_id}") - File.open(hostsfile, File::CREAT|File::RDWR, 0644) { |f| - f.flock(File::LOCK_EX) - newlines = Array.new - f.readlines.each { |line| - newlines << line if !line.match(/ #{MU.deploy_id}\-/) - } - f.rewind - f.truncate(0) - f.puts(newlines) - f.flush - f.flock(File::LOCK_UN) - } + } + creds + end + private_class_method :listUsedCredentials + + def self.cleanCloud(cloud, habitats, regions, credsets) + cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + credsets.each_pair { |credset, acct_regions| + next if @credsused and !@credsused.include?(credset) + global_vs_region_semaphore = Mutex.new + global_done = {} + regionthreads = [] + acct_regions.each { |r| + if @regionsused + if @regionsused.size > 0 + next if !@regionsused.include?(r) + else + next if r != cloudclass.myRegion(credset) + end end - else - MU.log "Residual /etc/hosts entries for #{MU.deploy_id} must be removed by root user", MU::WARN + if regions and !regions.empty? + next if !regions.include?(r) + MU.log "Checking for #{cloud}/#{credset} resources from #{MU.deploy_id} in #{r}...", MU::NOTICE + end + regionthreads << Thread.new { + Thread.abort_on_exception = false + MU.setVar("curRegion", r) + cleanRegion(cloud, credset, r, global_vs_region_semaphore, global_done, habitats) + } # regionthreads << Thread.new { + } # acct_regions.each { |r| + regionthreads.each do |t| + t.join end - end + } + end + private_class_method :cleanCloud - if !@noop and !@skipcloud - if $MU_CFG['aws'] and $MU_CFG['aws']['account_number'] - MU::Cloud::AWS.s3(region: MU.myRegion).delete_object( - bucket: MU.adminBucketName, - key: "#{MU.deploy_id}-secret" - ) + def self.cleanRegion(cloud, credset, region, global_vs_region_semaphore, global_done, habitats) + had_failures = false + cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + habitatclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get("Habitat") + + projects = [] + if habitats + projects = habitats + else + if $MU_CFG and $MU_CFG[cloud.downcase] and + $MU_CFG[cloud.downcase][credset] and + $MU_CFG[cloud.downcase][credset]["project"] +# XXX GCP credential schema needs an array for projects + projects << $MU_CFG[cloud.downcase][credset]["project"] end - if $MU_CFG['google'] and $MU_CFG['google']['project'] - begin - MU::Cloud::Google.storage.delete_object( - MU.adminBucketName, - "#{MU.deploy_id}-secret" - ) - rescue ::Google::Apis::ClientError => e - raise e if !e.message.match(/^notFound: /) - end + begin + projects.concat(cloudclass.listHabitats(credset)) + rescue NoMethodError + end + end + + if projects == [] + projects << "" # dummy + MU.log "Checking for #{cloud}/#{credset} resources from #{MU.deploy_id} in #{region}", MU::NOTICE + end + projects.uniq! + + # We do these in an order that unrolls dependent resources + # sensibly, and we hit :Collection twice because AWS + # CloudFormation sometimes fails internally. + projectthreads = [] + projects.each { |project| + if habitats and !habitats.empty? and project != "" + next if !habitats.include?(project) end - if MU.myCloud == "AWS" - MU::Cloud::AWS.openFirewallForClients # XXX add the other clouds, or abstract + if @habitatsused and !@habitatsused.empty? and project != "" + next if !@habitatsused.include?(project) end + next if !habitatclass.isLive?(project, credset) + + projectthreads << Thread.new { + Thread.abort_on_exception = false + if !cleanHabitat(cloud, credset, region, project, global_vs_region_semaphore, global_done) + had_failures = true + end + } # TYPES_IN_ORDER.each { |t| + } # projects.each { |project| + projectthreads.each do |t| + t.join end - if !@noop and !@skipcloud and (@mommacat.nil? or @mommacat.numKittens(types: ["Server", "ServerPool"]) > 0) -# MU::MommaCat.syncMonitoringConfig + had_failures + end + private_class_method :cleanRegion + + def self.cleanHabitat(cloud, credset, region, habitat, global_vs_region_semaphore, global_done) + had_failures = false + if habitat != "" + MU.log "Checking for #{cloud}/#{credset} resources from #{MU.deploy_id} in #{region}, habitat #{habitat}", MU::NOTICE end + flags = { + "habitat" => habitat, + "onlycloud" => @onlycloud, + "skipsnapshots" => @skipsnapshots, + } + TYPES_IN_ORDER.each { |t| + begin + skipme = false + global_vs_region_semaphore.synchronize { + MU::Cloud.loadCloudType(cloud, t) + if Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(t).isGlobal? + global_done[habitat] ||= [] + if !global_done[habitat].include?(t) + global_done[habitat] << t + flags['global'] = true + else + skipme = true + end + end + } + next if skipme + rescue MU::Cloud::MuDefunctHabitat, MU::Cloud::MuCloudResourceNotImplemented + next + rescue MU::MuError, NoMethodError => e + MU.log "While checking mu/clouds/#{cloud.downcase}/#{cloudclass.cfg_name} for global-ness in cleanup: "+e.message, MU::WARN + next + rescue ::Aws::EC2::Errors::AuthFailure, ::Google::Apis::ClientError => e + MU.log e.message+" in "+region, MU::ERR + next + end + + begin + if !call_cleanup(t, credset, cloud, flags, region) + had_failures = true + end + rescue MU::Cloud::MuDefunctHabitat, MU::Cloud::MuCloudResourceNotImplemented + next + end + } + had_failures = true end + private_class_method :cleanHabitat # Wrapper for dynamically invoking resource type cleanup methods. # @param type [String]: @@ -444,24 +335,21 @@ def self.call_cleanup(type, credset, provider, flags, region) flags['known'] << found.cloud_id end end -# begin - resclass = Object.const_get("MU").const_get("Cloud").const_get(type) - - resclass.cleanup( - noop: @noop, - ignoremaster: @ignoremaster, - region: region, - cloud: provider, - flags: flags, - credentials: credset - ) -# rescue ::Seahorse::Client::NetworkingError => e -# MU.log "Service not available in AWS region #{r}, skipping", MU::DEBUG, details: e.message -# end + resclass = Object.const_get("MU").const_get("Cloud").const_get(type) + + resclass.cleanup( + noop: @noop, + ignoremaster: @ignoremaster, + region: region, + cloud: provider, + flags: flags, + credentials: credset + ) else true end - end + private_class_method :call_cleanup + end #class end #module diff --git a/modules/mu/cloud.rb b/modules/mu/cloud.rb index 5a844d50d..e56259c40 100644 --- a/modules/mu/cloud.rb +++ b/modules/mu/cloud.rb @@ -49,7 +49,7 @@ class MuDefunctHabitat < StandardError; generic_instance_methods = [:create, :notify, :mu_name, :cloud_id, :config] # Class methods which the base of a cloud implementation must implement - generic_class_methods_toplevel = [:required_instance_methods, :myRegion, :listRegions, :listAZs, :hosted?, :hosted_config, :config_example, :writeDeploySecret, :listCredentials, :credConfig, :listInstanceTypes, :adminBucketName, :adminBucketUrl, :habitat] + generic_class_methods_toplevel = [:required_instance_methods, :myRegion, :listRegions, :listAZs, :hosted?, :hosted_config, :config_example, :writeDeploySecret, :listCredentials, :credConfig, :listInstanceTypes, :adminBucketName, :adminBucketUrl, :listHabitats, :habitat, :virtual?] # Public attributes which will be available on all instantiated cloud resource objects # @@ -529,7 +529,7 @@ def self.getStockImage(cloud = MU::Config.defaultCloud, platform: nil, region: n images.deep_merge!(YAML.load(response)) break end - rescue StandardError + rescue StandardError => e if fail_hard raise MuError, "Failed to fetch stock images from #{base_url}/#{cloud}.yaml (#{e.message})" else @@ -644,9 +644,16 @@ def self.resource_types; # Shorthand lookup for resource type names. Given any of the shorthand class name, configuration name (singular or plural), or full class name, return all four as a set. # @param type [String]: A string that looks like our short or full class name or singular or plural configuration names. + # @param assert [Boolean]: Raise an exception if the type isn't valid # @return [Array]: Class name (Symbol), singular config name (String), plural config name (String), full class name (Object) - def self.getResourceNames(type) - return [nil, nil, nil, nil, {}] if !type + def self.getResourceNames(type, assert = true) + if !type + if assert + raise MuError, "nil resource type requested in getResourceNames" + else + return [nil, nil, nil, nil, {}] + end + end @@resource_types.each_pair { |name, cloudclass| if name == type.to_sym or cloudclass[:cfg_name] == type or @@ -656,6 +663,10 @@ def self.getResourceNames(type) return [type.to_sym, cloudclass[:cfg_name], cloudclass[:cfg_plural], Object.const_get("MU").const_get("Cloud").const_get(name), cloudclass] end } + if assert + raise MuError, "Invalid resource type #{type} requested in getResourceNames" + end + [nil, nil, nil, nil, {}] end @@ -684,6 +695,14 @@ def self.supportedClouds @@supportedCloudList end + # Raise an exception if the cloud provider specified isn't valid + def self.assertSupportedCloud(cloud) + if cloud.nil? or !supportedClouds.include?(cloud.to_s) + raise MuError, "Cloud provider #{cloud} is not supported" + end + Object.const_get("MU").const_get("Cloud").const_get(cloud.to_s) + end + # List of known/supported Cloud providers for which we have at least one # set of credentials configured. # @return [Array] @@ -701,6 +720,14 @@ def self.availableClouds available end + # Raise an exception if the cloud provider specified isn't valid or we + # don't have any credentials configured for it. + def self.assertAvailableCloud(cloud) + if cloud.nil? or availableClouds.include?(cloud.to_s) + raise MuError, "Cloud provider #{cloud} is not available" + end + end + # Load the container class for each cloud we know about, and inject autoload # code for each of its supported resource type classes. failed = [] @@ -823,20 +850,20 @@ def self.loadCloudType(cloud, type) @cloud_class_cache[cloud] = {} if !@cloud_class_cache.has_key?(cloud) begin cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - myclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(type) - @@resource_types[type.to_sym][:class].each { |class_method| + myclass = Object.const_get("MU").const_get("Cloud").const_get(cloud).const_get(shortclass) + @@resource_types[shortclass.to_sym][:class].each { |class_method| if !myclass.respond_to?(class_method) or myclass.method(class_method).owner.to_s != "#" - raise MuError, "MU::Cloud::#{cloud}::#{type} has not implemented required class method #{class_method}" + raise MuError, "MU::Cloud::#{cloud}::#{shortclass} has not implemented required class method #{class_method}" end } - @@resource_types[type.to_sym][:instance].each { |instance_method| + @@resource_types[shortclass.to_sym][:instance].each { |instance_method| if !myclass.public_instance_methods.include?(instance_method) - raise MuCloudResourceNotImplemented, "MU::Cloud::#{cloud}::#{type} has not implemented required instance method #{instance_method}" + raise MuCloudResourceNotImplemented, "MU::Cloud::#{cloud}::#{shortclass} has not implemented required instance method #{instance_method}" end } cloudclass.required_instance_methods.each { |instance_method| if !myclass.public_instance_methods.include?(instance_method) - MU.log "MU::Cloud::#{cloud}::#{type} has not implemented required instance method #{instance_method}, will declare as attr_accessor", MU::DEBUG + MU.log "MU::Cloud::#{cloud}::#{shortclass} has not implemented required instance method #{instance_method}, will declare as attr_accessor", MU::DEBUG end } @@ -844,7 +871,7 @@ def self.loadCloudType(cloud, type) return myclass rescue NameError => e @cloud_class_cache[cloud][type] = nil - raise MuCloudResourceNotImplemented, "The '#{type}' resource is not supported in cloud #{cloud} (tried MU::#{cloud}::#{type})", e.backtrace + raise MuCloudResourceNotImplemented, "The '#{type}' resource is not supported in cloud #{cloud} (tried MU::Cloud::#{cloud}::#{shortclass})", e.backtrace end end @@ -867,6 +894,8 @@ def self.const_missing(symbol) Object.const_get("MU").const_get("Cloud").const_get(name).class_eval { attr_reader :cloudclass attr_reader :cloudobj + attr_reader :credentials + attr_reader :config attr_reader :destroyed attr_reader :delayed_save @@ -921,14 +950,27 @@ def to_s # @return [String]: Our new +deploy_id+ def intoDeploy(mommacat, force: false) if force or (!@deploy) - MU.log "Inserting #{self} (#{self.object_id}) into #{mommacat.deploy_id}", MU::DEBUG + MU.log "Inserting #{self} [#{self.object_id}] into #{mommacat.deploy_id} as a #{@config['name']}", MU::DEBUG + @deploy = mommacat + @deploy.addKitten(@cloudclass.cfg_plural, @config['name'], self) @deploy_id = @deploy.deploy_id @cloudobj.intoDeploy(mommacat, force: force) if @cloudobj end @deploy_id end + # Return the +virtual_name+ config field, if it is set. + # @param name [String]: If set, will only return a value if +virtual_name+ matches this string + # @return [String,nil] + def virtual_name(name = nil) + if @config and @config['virtual_name'] and + (!name or name == @config['virtual_name']) + return @config['virtual_name'] + end + nil + end + # @param mommacat [MU::MommaCat]: The deployment containing this cloud resource # @param mu_name [String]: Optional- specify the full Mu resource name of an existing resource to load, instead of creating a new one # @param cloud_id [String]: Optional- specify the cloud provider's identifier for an existing resource to load, instead of creating a new one @@ -955,7 +997,6 @@ def initialize(**args) if my_cloud.nil? or !MU::Cloud.supportedClouds.include?(my_cloud) raise MuError, "Can't instantiate a MU::Cloud object without a valid cloud (saw '#{my_cloud}')" end - @cloudclass = MU::Cloud.loadCloudType(my_cloud, self.class.shortname) @cloudparentclass = Object.const_get("MU").const_get("Cloud").const_get(my_cloud) @cloudobj = @cloudclass.new( @@ -981,7 +1022,6 @@ def initialize(**args) MU.log "#{self} in #{@deploy.deploy_id} didn't generate a mu_name after being loaded/initialized, dependencies on this resource will probably be confused!", MU::ERR, details: [caller, args.keys] end - # We are actually a child object invoking this via super() from its # own initialize(), so initialize all the attributes and instance # variables we know to be universal. @@ -2021,17 +2061,18 @@ def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_re loglevel = retries > 4 ? MU::NOTICE : MU::DEBUG MU.log "Calling WinRM on #{@mu_name}", loglevel, details: opts opts = { - endpoint: 'https://'+@mu_name+':5986/wsman', retry_limit: winrm_retries, no_ssl_peer_verification: true, # XXX this should not be necessary; we get 'hostname "foo" does not match the server certificate' even when it clearly does match ca_trust_path: "#{MU.mySSLDir}/Mu_CA.pem", transport: :ssl, operation_timeout: timeout, } - if retries % 2 == 0 + if retries % 2 == 0 # NTLM password over https + opts[:endpoint] = 'https://'+canonical_ip+':5986/wsman' opts[:user] = @config['windows_admin_username'] opts[:password] = getWindowsAdminPassword - else + else # certificate auth over https + opts[:endpoint] = 'https://'+@mu_name+':5986/wsman' opts[:client_cert] = "#{MU.mySSLDir}/#{@mu_name}-winrm.crt" opts[:client_key] = "#{MU.mySSLDir}/#{@mu_name}-winrm.key" end diff --git a/modules/mu/clouds/aws.rb b/modules/mu/clouds/aws.rb index 0d30d5dd1..e1ef49515 100644 --- a/modules/mu/clouds/aws.rb +++ b/modules/mu/clouds/aws.rb @@ -33,6 +33,18 @@ class AWS module AdditionalResourceMethods end + # Is this a "real" cloud provider, or a stub like CloudFormation? + def self.virtual? + false + end + + # List all AWS projects available to our credentials + def self.listHabitats(credentials = nil) + cfg = credConfig(credentials) + return [] if !cfg or !cfg['account_number'] + [cfg['account_number']] + end + # A hook that is always called just before any of the instance method of # our resource implementations gets invoked, so that we can ensure that # repetitive setup tasks (like resolving +:resource_group+ for Azure @@ -344,6 +356,27 @@ def self.initDeploy(deploy) # etc) # @param deploy_id [MU::MommaCat] def self.cleanDeploy(deploy_id, credentials: nil, noop: false) + + if !noop + MU.log "Deleting s3://#{adminBucketName(credentials)}/#{deploy_id}-secret" + MU::Cloud::AWS.s3(credentials: credentials).delete_object( + bucket: adminBucketName(credentials), + key: "#{deploy_id}-secret" + ) + listRegions(credentials: credentials).each { |r| + resp = MU::Cloud::AWS.ec2(region: r, credentials: credentials).describe_key_pairs( + filters: [{name: "key-name", values: ["deploy-#{MU.deploy_id}"]}] + ) + resp.data.key_pairs.each { |keypair| + MU.log "Deleting key pair #{keypair.key_name} from #{r}" + MU::Cloud::AWS.ec2(region: r, credentials: credentials).delete_key_pair(key_name: keypair.key_name) if !noop + } + } + + end + if hosted? + MU::Cloud::AWS.openFirewallForClients + end end # Plant a Mu deploy secret into a storage bucket somewhere for so our kittens can consume it @@ -1394,7 +1427,7 @@ class AmazonEndpoint # Create an AWS API client # @param region [String]: Amazon region so we know what endpoint to use # @param api [String]: Which API are we wrapping? - def initialize(region: MU.curRegion, api: "EC2", credentials: nil) + def initialize(region: nil, api: "EC2", credentials: nil) @cred_obj = MU::Cloud::AWS.loadCredentials(credentials) @credentials = MU::Cloud::AWS.credConfig(credentials, name_only: true) @@ -1403,6 +1436,8 @@ def initialize(region: MU.curRegion, api: "EC2", credentials: nil) end params = {} + region ||= MU::Cloud::AWS.credConfig(credentials)['region'] + region ||= MU.myRegion if region @region = region diff --git a/modules/mu/clouds/aws/container_cluster.rb b/modules/mu/clouds/aws/container_cluster.rb index 54f98953f..50743eb82 100644 --- a/modules/mu/clouds/aws/container_cluster.rb +++ b/modules/mu/clouds/aws/container_cluster.rb @@ -67,16 +67,15 @@ def create # soul-crushing, yet effective if e.message.match(/because (#{Regexp.quote(@config['region'])}[a-z]), the targeted availability zone, does not currently have sufficient capacity/) bad_az = Regexp.last_match(1) - deletia = nil + deletia = [] mySubnets.each { |subnet| - if subnet.az == bad_az - deletia = subnet.cloud_id - break - end + deletia << subnet.cloud_id if subnet.az == bad_az + } + raise e if deletia.empty? + MU.log "#{bad_az} does not have EKS capacity. Dropping unsupported subnets from ContainerCluster '#{@config['name']}' and retrying.", MU::NOTICE, details: deletia + deletia.each { |subnet| + params[:resources_vpc_config][:subnet_ids].delete(subnet) } - raise e if deletia.nil? - MU.log "#{bad_az} does not have EKS capacity. Dropping #{deletia} from ContainerCluster '#{@config['name']}' and retrying.", MU::NOTICE - params[:resources_vpc_config][:subnet_ids].delete(deletia) end } @@ -1372,6 +1371,9 @@ def self.validateConfig(cluster, configurator) "name" => cluster["name"]+"pods", "phase" => "groom" } + if !MU::Master.kubectl + MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN + end end if MU::Cloud::AWS.isGovCloud?(cluster["region"]) and cluster["flavor"] == "EKS" @@ -1470,6 +1472,11 @@ def self.validateConfig(cluster, configurator) end if cluster["flavor"] == "EKS" + + if !MU::Master.kubectl + MU.log "Without a kubectl executable, I cannot bind IAM roles to EKS worker nodes", MU::ERR + ok = false + end worker_pool["canned_iam_policies"] = [ "AmazonEKSWorkerNodePolicy", "AmazonEKS_CNI_Policy", @@ -1602,19 +1609,21 @@ def apply_kubernetes_resources raise MuError, "Failed to apply #{authmap_cmd}" if $?.exitstatus != 0 end - admin_user_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-user.yaml"} - admin_role_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-role-binding.yaml"} - MU.log "Configuring Kubernetes admin-user and role", MU::NOTICE, details: admin_user_cmd+"\n"+admin_role_cmd - %x{#{admin_user_cmd}} - %x{#{admin_role_cmd}} - - if @config['kubernetes_resources'] - MU::Master.applyKubernetesResources( - @config['name'], - @config['kubernetes_resources'], - kubeconfig: kube_conf, - outputdir: @deploy.deploy_dir - ) + if MU::Master.kubectl + admin_user_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-user.yaml"} + admin_role_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-role-binding.yaml"} + MU.log "Configuring Kubernetes admin-user and role", MU::NOTICE, details: admin_user_cmd+"\n"+admin_role_cmd + %x{#{admin_user_cmd}} + %x{#{admin_role_cmd}} + + if @config['kubernetes_resources'] + MU::Master.applyKubernetesResources( + @config['name'], + @config['kubernetes_resources'], + kubeconfig: kube_conf, + outputdir: @deploy.deploy_dir + ) + end end MU.log %Q{How to interact with your EKS cluster\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY diff --git a/modules/mu/clouds/aws/role.rb b/modules/mu/clouds/aws/role.rb index 4d8058727..b87535f73 100644 --- a/modules/mu/clouds/aws/role.rb +++ b/modules/mu/clouds/aws/role.rb @@ -1186,7 +1186,7 @@ def self.genPolicyDocument(policies, deploy_obj: nil, bucket_style: false) statement["Resource"] << id+"/*" end else - raise MuError, "Couldn't find a #{target["entity_type"]} named #{target["identifier"]} when generating IAM policy" + raise MuError, "Couldn't find a #{target["type"]} named #{target["identifier"]} when generating IAM policy" end else target["identifier"] += target["path"] if target["path"] diff --git a/modules/mu/clouds/aws/vpc.rb b/modules/mu/clouds/aws/vpc.rb index e8ac32d64..961bb8196 100644 --- a/modules/mu/clouds/aws/vpc.rb +++ b/modules/mu/clouds/aws/vpc.rb @@ -1270,7 +1270,11 @@ def self.getDefaultSg(vpc_id, region: MU.curRegion, credentials: nil) def peerWith(peer) peer_ref = MU::Config::Ref.get(peer['vpc']) peer_obj = peer_ref.kitten - peer_id = peer_ref.cloud_id + peer_id = peer_ref.kitten.cloud_id + if peer_id == @cloud_id + MU.log "#{@mu_name} attempted to peer with itself (#{@cloud_id})", MU::ERR, details: peer + raise "#{@mu_name} attempted to peer with itself (#{@cloud_id})" + end if peer_obj and peer_obj.config['peers'] peer_obj.config['peers'].each { |peerpeer| diff --git a/modules/mu/clouds/azure.rb b/modules/mu/clouds/azure.rb index 6c8a68a84..6a260bd33 100644 --- a/modules/mu/clouds/azure.rb +++ b/modules/mu/clouds/azure.rb @@ -47,6 +47,11 @@ def self.genGUID guid_chunks.join("-") end + # List all Azure subscriptions available to our credentials + def self.listHabitats(credentials = nil) + [] + end + # A hook that is always called just before any of the instance method of # our resource implementations gets invoked, so that we can ensure that # repetitive setup tasks (like resolving +:resource_group+ for Azure @@ -77,6 +82,11 @@ def self.required_instance_methods [:resource_group] end + # Is this a "real" cloud provider, or a stub like CloudFormation? + def self.virtual? + false + end + # Stub class to represent Azure's resource identifiers, which look like: # /subscriptions/3d20ddd8-4652-4074-adda-0d127ef1f0e0/resourceGroups/mu/providers/Microsoft.Network/virtualNetworks/mu-vnet # Various API calls need chunks of this in different contexts, and this diff --git a/modules/mu/clouds/cloudformation.rb b/modules/mu/clouds/cloudformation.rb index 072cf14b6..cec5bd605 100644 --- a/modules/mu/clouds/cloudformation.rb +++ b/modules/mu/clouds/cloudformation.rb @@ -28,6 +28,16 @@ def self.required_instance_methods @@cloudformation_mode = false + # Is this a "real" cloud provider, or a stub like CloudFormation? + def self.virtual? + true + end + + # List all AWS projects available to our credentials + def self.listHabitats(credentials = nil) + MU::Cloud::AWS.listHabitats(credentials) + end + # Return what we think of as a cloud object's habitat. In AWS, this means # the +account_number+ in which it's resident. If this is not applicable, # such as for a {Habitat} or {Folder}, returns nil. diff --git a/modules/mu/clouds/google.rb b/modules/mu/clouds/google.rb index f61f00930..9ba5f56b6 100644 --- a/modules/mu/clouds/google.rb +++ b/modules/mu/clouds/google.rb @@ -52,6 +52,11 @@ def self.required_instance_methods [:url] end + # Is this a "real" cloud provider, or a stub like CloudFormation? + def self.virtual? + false + end + # Most of our resource implementation +find+ methods have to mangle their # args to make sure they've extracted a project or location argument from # other available information. This does it for them. @@ -337,6 +342,7 @@ def self.initDeploy(deploy) # etc) # @param deploy_id [MU::MommaCat] def self.cleanDeploy(deploy_id, credentials: nil, noop: false) + removeDeploySecretsAndRoles(deploy_id, noop: noop, credentials: credentials) end # Plant a Mu deploy secret into a storage bucket somewhere for so our kittens can consume it @@ -548,7 +554,7 @@ def self.get_machine_credentials(scopes, credentials = nil) begin listRegions(credentials: credentials) listInstanceTypes(credentials: credentials) - listProjects(credentials) + listHabitats(credentials) rescue ::Google::Apis::ClientError MU.log "Found machine credentials #{@@svc_account_name}, but these don't appear to have sufficient permissions or scopes", MU::WARN, details: scopes @@authorizers.delete(credentials) @@ -701,12 +707,20 @@ def self.defaultFolder(credentials = nil) end # List all Google Cloud Platform projects available to our credentials - def self.listProjects(credentials = nil) + def self.listHabitats(credentials = nil) cfg = credConfig(credentials) - return [] if !cfg or !cfg['project'] + return [] if !cfg + if cfg['restrict_to_habitats'] and cfg['restrict_to_habitats'].is_a?(Array) + cfg['restrict_to_habitats'] << cfg['project'] if cfg['project'] + return cfg['restrict_to_habitats'].uniq + end result = MU::Cloud::Google.resource_manager(credentials: credentials).list_projects result.projects.reject! { |p| p.lifecycle_state == "DELETE_REQUESTED" } - result.projects.map { |p| p.project_id } + allprojects = result.projects.map { |p| p.project_id } + if cfg['ignore_habitats'] and cfg['ignore_habitats'].is_a?(Array) + allprojects.reject! { |p| cfg['ignore_habitats'].include?(p) } + end + allprojects end @@regions = {} diff --git a/modules/mu/clouds/google/bucket.rb b/modules/mu/clouds/google/bucket.rb index effbe72ec..29626ca0e 100644 --- a/modules/mu/clouds/google/bucket.rb +++ b/modules/mu/clouds/google/bucket.rb @@ -145,9 +145,9 @@ def self.quality # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) - resp = MU::Cloud::Google.storage(credentials: credentials).list_buckets(flags['project']) + resp = MU::Cloud::Google.storage(credentials: credentials).list_buckets(flags['habitat']) if resp and resp.items resp.items.each { |bucket| if bucket.labels and bucket.labels["mu-id"] == MU.deploy_id.downcase and (ignoremaster or bucket.labels['mu-master-ip'] == MU.mu_public_ip.gsub(/\./, "_")) diff --git a/modules/mu/clouds/google/container_cluster.rb b/modules/mu/clouds/google/container_cluster.rb index ca258d6df..aeb8e9a5a 100644 --- a/modules/mu/clouds/google/container_cluster.rb +++ b/modules/mu/clouds/google/container_cluster.rb @@ -474,7 +474,6 @@ def groom MU.log %Q{How to interact with your GKE cluster\nkubectl --kubeconfig "#{kube_conf}" get events --all-namespaces\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY end - # Locate an existing ContainerCluster or ContainerClusters and return an array containing matching GCP resource descriptors for those that match. # @return [Array>]: The cloud provider's complete descriptions of matching ContainerClusters def self.find(**args) @@ -747,15 +746,15 @@ def self.quality # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) clusters = [] # Make sure we catch regional *and* zone clusters - found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['project']}/locations/#{region}") + found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['habitat']}/locations/#{region}") clusters.concat(found.clusters) if found and found.clusters MU::Cloud::Google.listAZs(region).each { |az| - found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['project']}/locations/#{az}") + found = MU::Cloud::Google.container(credentials: credentials).list_project_location_clusters("projects/#{flags['habitat']}/locations/#{az}") clusters.concat(found.clusters) if found and found.clusters } @@ -1097,7 +1096,7 @@ def self.validateConfig(cluster, configurator) } if !match MU.log "No version matching #{cluster['kubernetes']['version']} available, will try floating minor revision", MU::WARN - cluster['kubernetes']['version'].sub!(/^(\d+\.\d+\.).*/i, '\1') + cluster['kubernetes']['version'].sub!(/^(\d+\.\d+)\..*/i, '\1') master_versions.each { |v| if v.match(/^#{Regexp.quote(cluster['kubernetes']['version'])}/) match = true @@ -1145,6 +1144,10 @@ def self.validateConfig(cluster, configurator) cluster['instance_type'] = MU::Cloud::Google::Server.validateInstanceType(cluster["instance_type"], cluster["region"], project: cluster['project'], credentials: cluster['credentials']) ok = false if cluster['instance_type'].nil? + if !MU::Master.kubectl + MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN + end + ok end @@ -1236,7 +1239,7 @@ def writeKubeConfig # Take this opportunity to ensure that the 'client' service account # used by certificate authentication exists and has appropriate # privilege - if @username and @password + if @username and @password and MU::Master.kubectl File.open(client_binding, "w"){ |k| k.puts <<-EOF kind: ClusterRoleBinding diff --git a/modules/mu/clouds/google/database.rb b/modules/mu/clouds/google/database.rb index 2521eabe3..d4520db4a 100644 --- a/modules/mu/clouds/google/database.rb +++ b/modules/mu/clouds/google/database.rb @@ -108,13 +108,13 @@ def self.quality # @param region [String]: The cloud provider region in which to operate # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) -# instances = MU::Cloud::Google.sql(credentials: credentials).list_instances(flags['project'], filter: %Q{userLabels.mu-id:"#{MU.deploy_id.downcase}"}) +# instances = MU::Cloud::Google.sql(credentials: credentials).list_instances(flags['habitat'], filter: %Q{userLabels.mu-id:"#{MU.deploy_id.downcase}"}) # if instances and instances.items # instances.items.each { |instance| # MU.log "Deleting Cloud SQL instance #{instance.name}" -# MU::Cloud::Google.sql(credentials: credentials).delete_instance(flags['project'], instance.name) if !noop +# MU::Cloud::Google.sql(credentials: credentials).delete_instance(flags['habitat'], instance.name) if !noop # } # end end diff --git a/modules/mu/clouds/google/firewall_rule.rb b/modules/mu/clouds/google/firewall_rule.rb index 25dafb863..1a963d0a2 100644 --- a/modules/mu/clouds/google/firewall_rule.rb +++ b/modules/mu/clouds/google/firewall_rule.rb @@ -208,8 +208,8 @@ def self.quality # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} @@ -218,7 +218,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) MU::Cloud::Google.compute(credentials: credentials).delete( "firewall", - flags["project"], + flags["habitat"], nil, noop ) diff --git a/modules/mu/clouds/google/function.rb b/modules/mu/clouds/google/function.rb index e1e98753a..d57af699d 100644 --- a/modules/mu/clouds/google/function.rb +++ b/modules/mu/clouds/google/function.rb @@ -234,10 +234,10 @@ def self.quality # @param region [String]: The cloud provider region # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) # Make sure we catch regional *and* zone functions - found = MU::Cloud::Google::Function.find(credentials: credentials, region: region, project: flags["project"]) + found = MU::Cloud::Google::Function.find(credentials: credentials, region: region, project: flags["habitat"]) found.each_pair { |cloud_id, desc| if (desc.description and desc.description == MU.deploy_id) or (desc.labels and desc.labels["mu-id"] == MU.deploy_id.downcase and (ignoremaster or desc.labels["mu-master-ip"] == MU.mu_public_ip.gsub(/\./, "_"))) or diff --git a/modules/mu/clouds/google/loadbalancer.rb b/modules/mu/clouds/google/loadbalancer.rb index 053d1ba5b..3ce30df34 100644 --- a/modules/mu/clouds/google/loadbalancer.rb +++ b/modules/mu/clouds/google/loadbalancer.rb @@ -147,8 +147,8 @@ def self.quality # @param region [String]: The cloud provider region # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: nil, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} @@ -159,7 +159,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: nil, credentials: nil ["forwarding_rule", "region_backend_service"].each { |type| MU::Cloud::Google.compute(credentials: credentials).delete( type, - flags["project"], + flags["habitat"], region, noop ) @@ -170,7 +170,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: nil, credentials: nil ["global_forwarding_rule", "target_http_proxy", "target_https_proxy", "url_map", "backend_service", "health_check", "http_health_check", "https_health_check"].each { |type| MU::Cloud::Google.compute(credentials: credentials).delete( type, - flags["project"], + flags["habitat"], nil, noop ) diff --git a/modules/mu/clouds/google/role.rb b/modules/mu/clouds/google/role.rb index e74b35945..221e60511 100644 --- a/modules/mu/clouds/google/role.rb +++ b/modules/mu/clouds/google/role.rb @@ -731,25 +731,34 @@ def toKitten(**args) bindings[scopetype].each_pair { |scope_id, entity_types| # If we've been given a habitat filter, skip over bindings # that don't match it. - if scopetype == "projects" and args[:habitats] and - !args[:habitats].empty? and - !args[:habitats].include?(scope_id) - next + if scopetype == "projects" + if (args[:habitats] and !args[:habitats].empty? and + !args[:habitats].include?(scope_id)) or + !MU::Cloud::Google.listHabitats(@credentials).include?(scope_id) + next + end end entity_types.each_pair { |entity_type, entities| mu_entitytype = (entity_type == "serviceAccount" ? "user" : entity_type)+"s" entities.each { |entity| + foreign = if entity_type == "serviceAccount" and entity.match(/@(.*?)\.iam\.gserviceaccount\.com/) + !MU::Cloud::Google.listHabitats(@credentials).include?(Regexp.last_match[1]) + end entity_ref = if entity_type == "organizations" { "id" => ((org == my_org.name and @config['credentials']) ? @config['credentials'] : org) } elsif entity_type == "domain" { "id" => entity } else - MU::Config::Ref.get( - id: entity, - cloud: "Google", - type: mu_entitytype - ) + if foreign + { "id" => entity } + else + MU::Config::Ref.get( + id: entity, + cloud: "Google", + type: mu_entitytype + ) + end end refmap ||= {} refmap[entity_ref] ||= {} diff --git a/modules/mu/clouds/google/server.rb b/modules/mu/clouds/google/server.rb index 92966eab6..e228c192f 100644 --- a/modules/mu/clouds/google/server.rb +++ b/modules/mu/clouds/google/server.rb @@ -1016,7 +1016,6 @@ def getWindowsAdminPassword(use_cache: true) item: @config['windows_auth_vault']['item'], field: @config["windows_auth_vault"]["password_field"] ) -MU.log "RETURNINATING FROM CACHE", MU::WARN, details: win_admin_password return win_admin_password if win_admin_password rescue MU::Groomer::MuNoSuchSecret, MU::Groomer::RunError end @@ -1276,8 +1275,8 @@ def self.quality # @param region [String]: The cloud provider region # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) # XXX make damn sure MU.deploy_id is set filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} @@ -1288,13 +1287,12 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent MU::Cloud::Google.listAZs(region).each { |az| disks = [] resp = MU::Cloud::Google.compute(credentials: credentials).list_instances( - flags["project"], + flags["habitat"], az, filter: filter ) if !resp.items.nil? and resp.items.size > 0 resp.items.each { |instance| - saname = instance.tags.items.first.gsub(/[^a-z]/, "") # XXX this nonsense again MU.log "Terminating instance #{instance.name}" if !instance.disks.nil? and instance.disks.size > 0 instance.disks.each { |disk| @@ -1302,17 +1300,21 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent } end MU::Cloud::Google.compute(credentials: credentials).delete_instance( - flags["project"], + flags["habitat"], az, instance.name ) if !noop - MU.log "Removing service account #{saname}" - begin - MU::Cloud::Google.iam(credentials: credentials).delete_project_service_account( - "projects/#{flags["project"]}/serviceAccounts/#{saname}@#{flags["project"]}.iam.gserviceaccount.com" - ) if !noop - rescue ::Google::Apis::ClientError => e - raise e if !e.message.match(/^notFound: /) + if instance.service_accounts + instance.service_accounts.each { |sa| + MU.log "Removing service account #{sa.email}" + begin + MU::Cloud::Google.iam(credentials: credentials).delete_project_service_account( + "projects/#{flags["habitat"]}/serviceAccounts/#{sa.email}" + ) if !noop + rescue ::Google::Apis::ClientError => e + raise e if !e.message.match(/^notFound: /) + end + } end # XXX wait-loop on pending? # pp deletia @@ -1325,7 +1327,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent # XXX honor snapshotting MU::Cloud::Google.compute(credentials: credentials).delete( "disk", - flags["project"], + flags["habitat"], az, noop ) if !noop diff --git a/modules/mu/clouds/google/server_pool.rb b/modules/mu/clouds/google/server_pool.rb index 709ed7d27..0551282a8 100644 --- a/modules/mu/clouds/google/server_pool.rb +++ b/modules/mu/clouds/google/server_pool.rb @@ -432,8 +432,8 @@ def self.quality # @param region [String]: The cloud provider region # @return [void] def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} @@ -444,7 +444,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent ["region_autoscaler", "region_instance_group_manager"].each { |type| MU::Cloud::Google.compute(credentials: credentials).delete( type, - flags["project"], + flags["habitat"], region, noop ) @@ -452,7 +452,7 @@ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credent else MU::Cloud::Google.compute(credentials: credentials).delete( "instance_template", - flags["project"], + flags["habitat"], noop ) end diff --git a/modules/mu/clouds/google/user.rb b/modules/mu/clouds/google/user.rb index 270febc70..139876058 100644 --- a/modules/mu/clouds/google/user.rb +++ b/modules/mu/clouds/google/user.rb @@ -281,9 +281,9 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) end end - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) resp = MU::Cloud::Google.iam(credentials: credentials).list_project_service_accounts( - "projects/"+flags["project"] + "projects/"+flags["habitat"] ) if resp and resp.accounts and MU.deploy_id diff --git a/modules/mu/clouds/google/vpc.rb b/modules/mu/clouds/google/vpc.rb index 744bb18e5..38549a371 100644 --- a/modules/mu/clouds/google/vpc.rb +++ b/modules/mu/clouds/google/vpc.rb @@ -113,7 +113,7 @@ def trafficLogging(log_group_name: nil, resource_id: nil, resource_type: "VPC", # Describe this VPC # @return [Hash] def notify - base = MU.structToHash(cloud_desc) + base = MU.structToHash(cloud_desc, stringify_keys: true) base["cloud_id"] = @cloud_id base["project_id"] = habitat_id base.merge!(@config.to_h) @@ -301,14 +301,10 @@ def loadSubnets(use_cache: true) @deploy.deployment["vpcs"][@config['name']]["subnets"] and @deploy.deployment["vpcs"][@config['name']]["subnets"].size > 0 @deploy.deployment["vpcs"][@config['name']]["subnets"].each { |desc| - subnet = {} - subnet["ip_block"] = desc['ip_block'] - subnet["name"] = desc["name"] + subnet = desc.clone subnet['mu_name'] = @config['scrub_mu_isms'] ? @cloud_id+subnet['name'].downcase : MU::Cloud::Google.nameStr(@deploy.getResourceName(subnet['name'], max_length: 61)) - subnet["cloud_id"] = desc['cloud_id'] subnet["cloud_id"] ||= desc['self_link'].gsub(/.*?\/([^\/]+)$/, '\1') subnet["cloud_id"] ||= subnet['mu_name'] - subnet['az'] = desc["az"] subnet['az'] ||= desc["region"].gsub(/.*?\/([^\/]+)$/, '\1') @subnets << MU::Cloud::Google::VPC::Subnet.new(self, subnet, precache_description: false) } @@ -542,15 +538,15 @@ def self.quality # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server # @return [void] def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) - flags["project"] ||= MU::Cloud::Google.defaultProject(credentials) - return if !MU::Cloud::Google::Habitat.isLive?(flags["project"], credentials) + flags["habitat"] ||= MU::Cloud::Google.defaultProject(credentials) + return if !MU::Cloud::Google::Habitat.isLive?(flags["habitat"], credentials) filter = %Q{(labels.mu-id = "#{MU.deploy_id.downcase}")} if !ignoremaster and MU.mu_public_ip filter += %Q{ AND (labels.mu-master-ip = "#{MU.mu_public_ip.gsub(/\./, "_")}")} end MU.log "Placeholder: Google VPC artifacts do not support labels, so ignoremaster cleanup flag has no effect", MU::DEBUG, details: filter - purge_subnets(noop, project: flags['project'], credentials: credentials) + purge_subnets(noop, project: flags['habitat'], credentials: credentials) ["route", "network"].each { |type| # XXX tagged routes aren't showing up in list, and the networks that own them # fail to delete silently @@ -559,7 +555,7 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) begin MU::Cloud::Google.compute(credentials: credentials).delete( type, - flags["project"], + flags["habitat"], nil, noop ) @@ -569,13 +565,13 @@ def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {}) MU.log e.message, MU::WARN if e.message.match(/Failed to delete network (.+)/) network_name = Regexp.last_match[1] - fwrules = MU::Cloud::Google::FirewallRule.find(project: flags['project'], credentials: credentials) + fwrules = MU::Cloud::Google::FirewallRule.find(project: flags['habitat'], credentials: credentials) fwrules.reject! { |_name, desc| !desc.network.match(/.*?\/#{Regexp.quote(network_name)}$/) } fwrules.keys.each { |name| MU.log "Attempting to delete firewall rule #{name} so that VPC #{network_name} can be removed", MU::NOTICE - MU::Cloud::Google.compute(credentials: credentials).delete_firewall(flags['project'], name) + MU::Cloud::Google.compute(credentials: credentials).delete_firewall(flags['habitat'], name) } end end @@ -1120,7 +1116,7 @@ def defaultRoute # Describe this VPC Subnet # @return [Hash] def notify - MU.structToHash(cloud_desc) + MU.structToHash(cloud_desc, stringify_keys: true) end # Return the +self_link+ to this subnet diff --git a/modules/mu/config.rb b/modules/mu/config.rb index bd69e42da..404659c49 100644 --- a/modules/mu/config.rb +++ b/modules/mu/config.rb @@ -77,7 +77,7 @@ def self.manxify(config, remove_runtime_keys: false) if config.is_a?(Hash) newhash = {} config.each_pair { |key, val| - next if remove_runtime_keys and key.match(/^#MU_/) + next if remove_runtime_keys and (key.nil? or key.match(/^#MU_/)) next if val.is_a?(Array) and val.empty? newhash[key] = self.manxify(val, remove_runtime_keys: remove_runtime_keys) } diff --git a/modules/mu/config/container_cluster.rb b/modules/mu/config/container_cluster.rb index d46ac334d..9aa97bcb3 100644 --- a/modules/mu/config/container_cluster.rb +++ b/modules/mu/config/container_cluster.rb @@ -104,6 +104,11 @@ def self.validate(cluster, _configurator) cluster["min_size"] ||= [cluster["instance_count"], cluster["min_size"]].reject { |c| c.nil? }.min end + if cluster['kubernetes_resources'] and !MU::Master.kubectl + MU.log "Cannot apply kubernetes resources without a working kubectl executable", MU::ERR + ok = false + end + ok end diff --git a/modules/mu/config/doc_helpers.rb b/modules/mu/config/doc_helpers.rb index 56999de56..5ba4cd076 100644 --- a/modules/mu/config/doc_helpers.rb +++ b/modules/mu/config/doc_helpers.rb @@ -239,7 +239,7 @@ def self.printSchema(kitten_rb, class_hierarchy, schema, in_array = false, requi if class_hierarchy.size == 1 - _shortclass, cfg_name, cfg_plural, _classname = MU::Cloud.getResourceNames(name) + _shortclass, cfg_name, cfg_plural, _classname = MU::Cloud.getResourceNames(name, false) if cfg_name example_path = MU.myRoot+"/modules/mu/config/"+cfg_name+".yml" if File.exist?(example_path) diff --git a/modules/mu/config/ref.rb b/modules/mu/config/ref.rb index 21c647b76..1addece45 100644 --- a/modules/mu/config/ref.rb +++ b/modules/mu/config/ref.rb @@ -255,7 +255,7 @@ def kitten(mommacat = @mommacat, shallow: false) if @obj @deploy_id ||= @obj.deploy_id @id ||= @obj.cloud_id - @name ||= @obj.config['name'] + @name ||= @obj.config['name'] if @obj.config return @obj end @@ -266,6 +266,7 @@ def kitten(mommacat = @mommacat, shallow: false) @mommacat ||= mommacat @obj.intoDeploy(@mommacat) # make real sure these are set @deploy_id ||= mommacat.deploy_id + if !@name if @obj.config and @obj.config['name'] @name = @obj.config['name'] @@ -283,6 +284,7 @@ def kitten(mommacat = @mommacat, shallow: false) end if !@obj and !(@cloud == "Google" and @id and @type == "users" and MU::Cloud::Google::User.cannedServiceAcctName?(@id)) and !shallow + try_deploy_id = @deploy_id begin hab_arg = if @habitat.nil? @@ -300,22 +302,26 @@ def kitten(mommacat = @mommacat, shallow: false) @type, name: @name, cloud_id: @id, - deploy_id: @deploy_id, + deploy_id: try_deploy_id, region: @region, habitats: hab_arg, credentials: @credentials, dummy_ok: (["habitats", "folders", "users", "groups", "vpcs"].include?(@type)) ) @obj ||= found.first if found + rescue MU::MommaCat::MultipleMatches => e + if try_deploy_id.nil? and MU.deploy_id + MU.log "Attempting to narrow down #{@cloud} #{@type} to #{MU.deploy_id}", MU::NOTICE + try_deploy_id = MU.deploy_id + retry + else + raise e + end rescue ThreadError => e # Sometimes MommaCat calls us in a potential deadlock situation; # don't be the cause of a fatal error if so, we don't need this # object that badly. raise e if !e.message.match(/recursive locking/) -rescue SystemExit -# XXX this is temporary, to cope with some debug stuff that's in findStray -# for the nonce -return end end diff --git a/modules/mu/config/schema_helpers.rb b/modules/mu/config/schema_helpers.rb index dc016d171..9a3b874a5 100644 --- a/modules/mu/config/schema_helpers.rb +++ b/modules/mu/config/schema_helpers.rb @@ -276,15 +276,20 @@ def applySchemaDefaults(conf_chunk = config, schema_chunk = schema, depth = 0, s schema_chunk["properties"]["creation_style"] != "existing" schema_chunk["properties"].each_pair { |key, subschema| shortclass = if conf_chunk[key] - shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(key) + shortclass, _cfg_name, _cfg_plural, _classname = MU::Cloud.getResourceNames(key, false) shortclass else nil end new_val = applySchemaDefaults(conf_chunk[key], subschema, depth+1, conf_chunk, type: shortclass).dup - - conf_chunk[key] = Marshal.load(Marshal.dump(new_val)) if !new_val.nil? + if !new_val.nil? + begin + conf_chunk[key] = Marshal.load(Marshal.dump(new_val)) + rescue TypeError + conf_chunk[key] = new_val.clone + end + end } end elsif schema_chunk["type"] == "array" and conf_chunk.kind_of?(Array) diff --git a/modules/mu/config/server.rb b/modules/mu/config/server.rb index 0c7d0daea..159fde849 100644 --- a/modules/mu/config/server.rb +++ b/modules/mu/config/server.rb @@ -625,6 +625,13 @@ def self.validate(server, configurator) server['vault_access'] << {"vault" => "splunk", "item" => "admin_user"} ok = false if !MU::Config::Server.checkVaultRefs(server) + server['groomer'] ||= self.defaultGroomer + groomclass = MU::Groomer.loadGroomer(server['groomer']) + if !groomclass.available?(server['platform'].match(/^win/)) + MU.log "Groomer #{server['groomer']} for #{server['name']} is missing or has incomplete dependencies", MU::ERR + ok = false + end + if server["cloud"] != "Azure" server['dependencies'] << configurator.adminFirewallRuleset(vpc: server['vpc'], region: server['region'], cloud: server['cloud'], credentials: server['credentials']) end diff --git a/modules/mu/config/tail.rb b/modules/mu/config/tail.rb index 5fc2f7dc5..852a7cc26 100644 --- a/modules/mu/config/tail.rb +++ b/modules/mu/config/tail.rb @@ -133,6 +133,7 @@ def gsub(*args) # @param pseudo []: This is a pseudo-parameter, automatically provided, and not available as user input. # @param runtimecode []: Actual code to allow the cloud layer to interpret literally in its own idiom, e.g. '"Ref" : "AWS::StackName"' for CloudFormation def getTail(param, value: nil, prettyname: nil, cloudtype: "String", valid_values: [], description: nil, list_of: nil, prefix: "", suffix: "", pseudo: false, runtimecode: nil) + param = param.gsub(/[^a-z0-9_]/i, "_") if value.nil? if @@parameters.nil? or !@@parameters.has_key?(param) MU.log "Parameter '#{param}' (#{param.class.name}) referenced in config but not provided (#{caller[0]})", MU::DEBUG, details: @@parameters diff --git a/modules/mu/config/vpc.rb b/modules/mu/config/vpc.rb index 17a781489..bfacbb5eb 100644 --- a/modules/mu/config/vpc.rb +++ b/modules/mu/config/vpc.rb @@ -493,6 +493,7 @@ def self.validate(vpc, configurator) # See if we'll be able to create peering connections can_peer = false already_peered = false + if MU.myCloud == vpc["cloud"] and MU.myVPCObj if vpc['peers'] vpc['peers'].each { |peer| @@ -636,7 +637,7 @@ def self.resolvePeers(vpc, configurator) MU.log "VPC peering connections to non-local accounts must specify the vpc_id of the peer.", MU::ERR ok = false end - elsif !processReference(peer['vpc'], "vpcs", "vpc '#{vpc['name']}'", configurator, dflt_region: peer["vpc"]['region']) + elsif !processReference(peer['vpc'], "vpcs", vpc, configurator, dflt_region: peer["vpc"]['region']) ok = false end end @@ -735,8 +736,8 @@ def self.processReference(vpc_block, parent_type, parent, configurator, sibling_ vpc_block["subnet_pref"] = "all_private" if vpc_block["subnet_pref"] == "private" end - flags = {} - flags["subnet_pref"] = vpc_block["subnet_pref"] if !vpc_block["subnet_pref"].nil? +# flags = {} +# flags["subnet_pref"] = vpc_block["subnet_pref"] if !vpc_block["subnet_pref"].nil? hab_arg = if vpc_block['habitat'] if vpc_block['habitat'].is_a?(MU::Config::Ref) [vpc_block['habitat'].id] # XXX actually, findStray it @@ -770,9 +771,9 @@ def self.processReference(vpc_block, parent_type, parent, configurator, sibling_ tag_key: tag_key, tag_value: tag_value, region: vpc_block["region"], - flags: flags, habitats: hab_arg, - dummy_ok: true + dummy_ok: true, + subnet_pref: vpc_block["subnet_pref"] ) found.first if found and found.size == 1 @@ -799,7 +800,7 @@ def self.processReference(vpc_block, parent_type, parent, configurator, sibling_ @@reference_cache[vpc_block] ||= ext_vpc if ok end rescue StandardError => e - raise MuError, e.inspect, e.backtrace + raise MuError, e.inspect, [caller, e.backtrace] ensure if !ext_vpc and vpc_block['cloud'] != "CloudFormation" MU.log "Couldn't resolve VPC reference to a unique live VPC in #{parent_type} #{parent['name']} (called by #{caller[0]})", MU::ERR, details: vpc_block @@ -923,7 +924,14 @@ def self.processReference(vpc_block, parent_type, parent, configurator, sibling_ ext_vpc.subnets.each { |subnet| next if dflt_region and vpc_block["cloud"] == "Google" and subnet.az != dflt_region if subnet.private? and (vpc_block['subnet_pref'] != "all_public" and vpc_block['subnet_pref'] != "public") - private_subnets << { "subnet_id" => configurator.getTail("#{parent['name']} Private Subnet #{priv}", value: subnet.cloud_id, prettyname: "#{parent['name']} Private Subnet #{priv}", cloudtype: "AWS::EC2::Subnet::Id"), "az" => subnet.az } + private_subnets << { + "subnet_id" => configurator.getTail( + "#{parent['name']} Private Subnet #{priv}", + value: subnet.cloud_id, + prettyname: "#{parent['name']} Private Subnet #{priv}", + cloudtype: "AWS::EC2::Subnet::Id"), + "az" => subnet.az + } private_subnets_map[subnet.cloud_id] = subnet priv = priv + 1 elsif !subnet.private? and vpc_block['subnet_pref'] != "all_private" and vpc_block['subnet_pref'] != "private" diff --git a/modules/mu/config/vpc.yml b/modules/mu/config/vpc.yml index a0542173f..9e0d5b4cb 100644 --- a/modules/mu/config/vpc.yml +++ b/modules/mu/config/vpc.yml @@ -1,7 +1,6 @@ <% if complexity == 'complex' %> name: <%= vpc_name %> create_nat_gateway: true -ip_block: 10.231.0.0/16 enable_traffic_logging: true region: us-east-2 availability_zones: diff --git a/modules/mu/defaults/AWS.yaml b/modules/mu/defaults/AWS.yaml index 8ebebf735..f6215c90e 100644 --- a/modules/mu/defaults/AWS.yaml +++ b/modules/mu/defaults/AWS.yaml @@ -73,56 +73,56 @@ ubuntu14: ap-southeast-1: ami-2855964b ap-southeast-2: ami-d19fc4b2 win2k12r2: &1 - us-east-1: ami-00f7cf8d57d29a8a7 - us-east-2: ami-0c14a2a9b1d88428d - ca-central-1: ami-0210e4efc4186f89d - us-west-2: ami-036681205605cba8c - us-west-1: ami-072d0f2b03f351e5c - eu-west-1: ami-061524b3efcc026da - eu-west-2: ami-0a7aeb2dae7c7154b - eu-west-3: ami-0b16adff6701f08bb - eu-north-1: ami-09bd34c6465aa914b - sa-east-1: ami-078221cae70b179c4 - eu-central-1: ami-047d37ec58a8469fb - ap-northeast-1: ami-0ce23ffef990003d2 - ap-south-1: ami-0106284f16a19651a - ap-northeast-2: ami-0518e43d0367f1a6d - ap-southeast-1: ami-0858019a829a5169d - ap-southeast-2: ami-0e0d7d3acb6427f53 + us-east-1: ami-003aea65bc2e7136a + us-east-2: ami-0163293e39ba504c2 + ca-central-1: ami-055689dd92f29d2aa + us-west-2: ami-0ce87dda2c9244e57 + us-west-1: ami-00d9cf64bd2fafa44 + eu-west-1: ami-026d7427b9fadad40 + eu-west-2: ami-036a22c0780551794 + eu-west-3: ami-05e3d9b79bdc10861 + eu-north-1: ami-063eb48504c7d73f1 + sa-east-1: ami-0a8c1829a5e650bc5 + eu-central-1: ami-0ea20cef52335b008 + ap-northeast-1: ami-08db2dc67228dbb90 + ap-south-1: ami-012241411db3f09c3 + ap-northeast-2: ami-0368c224de1d20502 + ap-southeast-1: ami-028ef74e1edc3943a + ap-southeast-2: ami-09e03eab1b1bc151b win2k16: &5 - us-east-1: ami-090dd1749dc0be91d - us-east-2: ami-09c9eeb95291e63d7 - ca-central-1: ami-0c63a53a15fca4238 - us-west-2: ami-0b8540e9a207143eb - eu-west-1: ami-067b5d8d85d3c8cf8 - us-west-1: ami-0a2dc7e2cf21ab3e9 - eu-west-2: ami-0cece465ae4b18027 - eu-west-3: ami-0cc5f29ed6e0e8a67 - eu-central-1: ami-02d4da18299373531 - sa-east-1: ami-021d5c906c1898430 - ap-northeast-1: ami-01129c98f812a3be7 - ap-south-1: ami-005c7910458e9541d - ap-northeast-2: ami-024840a881c449d78 - ap-southeast-2: ami-070af17644a596301 - ap-southeast-1: ami-0bc2e098a07140bc4 - eu-north-1: ami-03dcc78d77d2ee027 + us-east-1: ami-02801a2c8dcbfb883 + us-east-2: ami-0ca4f779a2a58a7ea + ca-central-1: ami-05d3854d9d6e9bcc5 + us-west-2: ami-091f4a88ce32d28b6 + eu-west-1: ami-0b938c9b23ed7d18c + us-west-1: ami-0fd744c3fbe8260f2 + eu-west-2: ami-071a89b959c5eda27 + eu-west-3: ami-0b206e3dbda9ff9eb + eu-central-1: ami-0dd9bdad31dd0d3ce + sa-east-1: ami-0d69b8d6c0f9a7bae + ap-northeast-1: ami-02eb4a6f519bc3190 + ap-south-1: ami-0666fd543ac8b5501 + ap-northeast-2: ami-01277c81f9b91cf77 + ap-southeast-2: ami-0426a246f9b0ccadd + ap-southeast-1: ami-07ecb0d55c2eb7247 + eu-north-1: ami-047811530583b6d08 win2k19: - us-east-1: ami-09946f18cbcdce65c - us-east-2: ami-02ab72768678bb7d0 - ca-central-1: ami-0fcf1b24169d88d7f - us-west-2: ami-025ca67c85e4e147d - eu-west-2: ami-08a0e09c469e6a557 - us-west-1: ami-05960eb854f91cbb8 - eu-west-1: ami-0f91d02c05561cf5b - eu-central-1: ami-0faafc220143c941c - eu-west-3: ami-0d2a0b6f21c7ce4a2 - eu-north-1: ami-0fecae116e9e331bf - sa-east-1: ami-050693ece618acaa5 - ap-northeast-2: ami-0e0ebd96765911d72 - ap-northeast-1: ami-0729606d0a4051499 - ap-southeast-1: ami-06ba249ee50ee9669 - ap-southeast-2: ami-03a051a6d0f2b79d5 - ap-south-1: ami-02c287a24c5e872f6 + us-east-1: ami-00820419bf212df7e + us-east-2: ami-0a7916b90aa4629d5 + ca-central-1: ami-0d704529661e19185 + us-west-2: ami-0ee6a198d7ac35eb1 + eu-west-2: ami-0f6ac1634bd7add92 + us-west-1: ami-039e3816b4cac1e27 + eu-west-1: ami-03a771d99091199b7 + eu-central-1: ami-03b648d5b45f51a4f + eu-west-3: ami-068839907c18c3a6e + eu-north-1: ami-0db851ee76f7deefb + sa-east-1: ami-0c2cc60c62159f87c + ap-northeast-2: ami-06bdf8ae9ae9add92 + ap-northeast-1: ami-02306d959c7f175b9 + ap-southeast-1: ami-0d5b4a3d73e0f471f + ap-southeast-2: ami-00fa88caff4f64937 + ap-south-1: ami-0b44feae4bb9f497a amazon: us-east-1: ami-b73b63a0 us-east-2: ami-58277d3d diff --git a/modules/mu/deploy.rb b/modules/mu/deploy.rb index ae7596447..5d7dda888 100644 --- a/modules/mu/deploy.rb +++ b/modules/mu/deploy.rb @@ -394,7 +394,7 @@ def run Thread.handle_interrupt(MU::Cloud::MuCloudResourceNotImplemented => :never) { begin Thread.handle_interrupt(MU::Cloud::MuCloudResourceNotImplemented => :immediate) { - MU.log "Cost calculator not available for this stack, as it uses a resource not implemented in Mu's CloudFormation layer.", MU::WARN, verbosity: MU::Logger::NORMAL + MU.log "Cost calculator not available for this stack, as it uses a resource not implemented in Mu's CloudFormation layer.", MU::DEBUG, verbosity: MU::Logger::NORMAL Thread.current.exit } ensure diff --git a/modules/mu/groomer.rb b/modules/mu/groomer.rb index 0cd6a0d9b..2cfb17af5 100644 --- a/modules/mu/groomer.rb +++ b/modules/mu/groomer.rb @@ -37,7 +37,7 @@ def self.requiredMethods # Class methods that any Groomer plugin must implement def self.requiredClassMethods - [:getSecret, :cleanup, :saveSecret, :deleteSecret] + [:getSecret, :cleanup, :saveSecret, :deleteSecret, :available?] end class Ansible; diff --git a/modules/mu/groomers/ansible.rb b/modules/mu/groomers/ansible.rb index 87b5c3c33..f08882db8 100644 --- a/modules/mu/groomers/ansible.rb +++ b/modules/mu/groomers/ansible.rb @@ -24,6 +24,10 @@ class Ansible class NoAnsibleExecError < MuError; end + # One or more Python dependencies missing + class AnsibleLibrariesError < MuError; + end + # Location in which we'll find our Ansible executables. This only applies # to full-grown Mu masters; minimalist gem installs will have to make do # with whatever Ansible executables they can find in $PATH. @@ -40,6 +44,10 @@ def initialize(node) @ansible_path = node.deploy.deploy_dir+"/ansible" @ansible_execs = MU::Groomer::Ansible.ansibleExecDir + if !MU::Groomer::Ansible.checkPythonDependencies(@server.windows?) + raise AnsibleLibrariesError, "One or more python dependencies not available" + end + if !@ansible_execs or @ansible_execs.empty? raise NoAnsibleExecError, "No Ansible executables found in visible paths" end @@ -54,6 +62,10 @@ def initialize(node) installRoles end + # Are Ansible executables and key libraries present and accounted for? + def self.available?(windows = false) + MU::Groomer::Ansible.checkPythonDependencies(windows) + end # Indicate whether our server has been bootstrapped with Ansible def haveBootstrapped? @@ -245,7 +257,7 @@ def run(purpose: "Ansible run", update_runlist: true, max_retries: 10, output: t "#{@server.config['name']}.yml" end - cmd = %Q{cd #{@ansible_path} && echo "#{purpose}" && #{@ansible_execs}/ansible-playbook -i hosts #{playbook} --limit=#{@server.mu_name} --vault-password-file #{pwfile} --timeout=30 --vault-password-file #{@ansible_path}/.vault_pw -u #{ssh_user}} + cmd = %Q{cd #{@ansible_path} && echo "#{purpose}" && #{@ansible_execs}/ansible-playbook -i hosts #{playbook} --limit=#{@server.windows? ? @server.canonicalIP : @server.mu_name} --vault-password-file #{pwfile} --timeout=30 --vault-password-file #{@ansible_path}/.vault_pw -u #{ssh_user}} retries = 0 begin @@ -294,7 +306,7 @@ def reinstall # Bootstrap our server with Ansible- basically, just make sure this node # is listed in our deployment's Ansible inventory. def bootstrap - @inventory.add(@server.config['name'], @server.mu_name) + @inventory.add(@server.config['name'], @server.windows? ? @server.canonicalIP : @server.mu_name) play = { "hosts" => @server.config['name'] } @@ -387,11 +399,18 @@ def saveDeployData allvars['deployment'] end + # Nuke everything associated with a deploy. Since we're just some files + # in the deploy directory, this doesn't have to do anything. + def self.cleanup(deploy_id, noop = false) +# deploy = MU::MommaCat.new(MU.deploy_id) +# inventory = Inventory.new(deploy) + end + # Expunge Ansible resources associated with a node. # @param node [String]: The Mu name of the node in question. # @param _vaults_to_clean [Array]: Dummy argument, part of this method's interface but not used by the Ansible layer # @param noop [Boolean]: Skip actual deletion, just state what we'd do - def self.cleanup(node, _vaults_to_clean = [], noop = false) + def self.purge(node, _vaults_to_clean = [], noop = false) deploy = MU::MommaCat.new(MU.deploy_id) inventory = Inventory.new(deploy) # ansible_path = deploy.deploy_dir+"/ansible" @@ -427,6 +446,50 @@ def self.encryptString(name, string) output end + # Hunt down and return a path for a Python executable + # @return [String] + def self.pythonExecDir + path = nil + + if File.exist?(BINDIR+"/python") + path = BINDIR + else + paths = [ansibleExecDir] + paths.concat(ENV['PATH'].split(/:/)) + paths << "/usr/bin" # not always in path, esp in pared-down Docker images + paths.reject! { |p| p.nil? } + paths.uniq.each { |bindir| + if File.exist?(bindir+"/python") + path = bindir + break + end + } + end + path + end + + # Make sure what's in our Python requirements.txt is reflected in the + # Python we're about to run for Ansible + def self.checkPythonDependencies(windows = false) + return nil if !ansibleExecDir + + execline = File.readlines(ansibleExecDir+"/ansible-playbook").first.chomp.sub(/^#!/, '') + if !execline + MU.log "Unable to extract a Python executable from #{ansibleExecDir}/ansible-playbook", MU::ERR + return false + end + + require 'tempfile' + f = Tempfile.new("pythoncheck") + f.puts "import ansible" + f.puts "import winrm" if windows + f.close + + system(%Q{#{execline} #{f.path}}) + f.unlink + $?.exitstatus == 0 ? true : false + end + # Hunt down and return a path for Ansible executables # @return [String] def self.ansibleExecDir @@ -434,7 +497,9 @@ def self.ansibleExecDir if File.exist?(BINDIR+"/ansible-playbook") path = BINDIR else - ENV['PATH'].split(/:/).each { |bindir| + paths = ENV['PATH'].split(/:/) + paths << "/usr/bin" + paths.uniq.each { |bindir| if File.exist?(bindir+"/ansible-playbook") path = bindir if !File.exist?(bindir+"/ansible-vault") diff --git a/modules/mu/groomers/chef.rb b/modules/mu/groomers/chef.rb index eace20cca..9f559c183 100644 --- a/modules/mu/groomers/chef.rb +++ b/modules/mu/groomers/chef.rb @@ -35,6 +35,12 @@ def const_missing(symbol) end } + # Are the Chef libraries present and accounted for? + def self.available?(windows = false) + loadChefLib + @chefloaded + end + @chefloaded = false @chefload_semaphore = Mutex.new # Autoload is too brain-damaged to get Chef's subclasses/submodules, so @@ -362,7 +368,7 @@ def run(purpose: "Chef run", update_runlist: true, max_retries: 5, output: true, } if resp.exitcode == 1 and output_lines.join("\n").match(/Chef Client finished/) - MU.log "resp.exit code 1" + MU.log output_lines.last elsif resp.exitcode != 0 raise MU::Cloud::BootstrapTempFail if resp.exitcode == 35 or output_lines.join("\n").match(/REBOOT_SCHEDULED| WARN: Reboot requested:|Rebooting server at a recipe's request|Chef::Exceptions::Reboot/) raise MU::Groomer::RunError, output_lines.slice(output_lines.length-50, output_lines.length).join("") @@ -619,15 +625,16 @@ def bootstrap kb.name_args = [@server.mu_name] kb.config[:manual] = true kb.config[:winrm_transport] = :ssl - kb.config[:host] = @server.mu_name kb.config[:winrm_port] = 5986 kb.config[:session_timeout] = timeout kb.config[:operation_timeout] = timeout if retries % 2 == 0 + kb.config[:host] = canonical_addr kb.config[:winrm_authentication_protocol] = :basic kb.config[:winrm_user] = @server.config['windows_admin_username'] kb.config[:winrm_password] = @server.getWindowsAdminPassword else + kb.config[:host] = @server.mu_name kb.config[:winrm_authentication_protocol] = :cert kb.config[:winrm_client_cert] = "#{MU.mySSLDir}/#{@server.mu_name}-winrm.crt" kb.config[:winrm_client_key] = "#{MU.mySSLDir}/#{@server.mu_name}-winrm.key" @@ -681,7 +688,7 @@ def bootstrap preClean(false) # it's ok for this to fail rescue StandardError => e end - MU::Groomer::Chef.cleanup(@server.mu_name, nodeonly: true) + MU::Groomer::Chef.purge(@server.mu_name, nodeonly: true) @config['forced_preclean'] = true @server.reboot if @server.windows? # *sigh* end @@ -798,12 +805,49 @@ def saveDeployData end end + def self.cleanup(deploy_id, noop = false) + return nil if deploy_id.nil? or deploy_id.empty? + begin + if File.exist?(Etc.getpwuid(Process.uid).dir+"/.chef/knife.rb") + ::Chef::Config.from_file(Etc.getpwuid(Process.uid).dir+"/.chef/knife.rb") + end + deadnodes = [] + ::Chef::Config[:environment] ||= MU.environment + q = ::Chef::Search::Query.new + begin + q.search("node", "tags_MU-ID:#{deploy_id}").each { |item| + next if item.is_a?(Integer) + item.each { |node| + deadnodes << node.name + } + } + rescue Net::HTTPServerException + end + + begin + q.search("node", "name:#{deploy_id}-*").each { |item| + next if item.is_a?(Integer) + item.each { |node| + deadnodes << node.name + } + } + rescue Net::HTTPServerException + end + MU.log "Missed some Chef resources in node cleanup, purging now", MU::NOTICE if deadnodes.size > 0 + deadnodes.uniq.each { |node| + MU::Groomer::Chef.purge(node, [], noop) + } + rescue LoadError + end + + end + # Expunge Chef resources associated with a node. # @param node [String]: The Mu name of the node in question. # @param vaults_to_clean [Array]: Some vaults to expunge # @param noop [Boolean]: Skip actual deletion, just state what we'd do # @param nodeonly [Boolean]: Just delete the node and its keys, but leave other artifacts - def self.cleanup(node, vaults_to_clean = [], noop = false, nodeonly: false) + def self.purge(node, vaults_to_clean = [], noop = false, nodeonly: false) loadChefLib MU.log "Deleting Chef resources associated with #{node}" if !nodeonly diff --git a/modules/mu/master.rb b/modules/mu/master.rb index 92bb3d1f9..791405b98 100644 --- a/modules/mu/master.rb +++ b/modules/mu/master.rb @@ -386,6 +386,7 @@ def self.kubectl best = nil best_version = nil paths.uniq.each { |path| + path.sub!(/^~/, MY_HOME) if File.exist?(path+"/kubectl") version = %x{#{path}/kubectl version --short --client}.chomp.sub(/.*Client version:\s+v/i, '') next if !$?.success? @@ -546,7 +547,7 @@ def self.addInstanceToEtcHosts(public_ip, chef_name = nil, system_name = nil) rescue Errno::ECONNRESET, Errno::ECONNREFUSED end if response != "ok" - MU.log "Error adding #{public_ip} to /etc/hosts via MommaCat request", MU::ERR + MU.log "Unable to add #{public_ip} to /etc/hosts via MommaCat request", MU::WARN end return end @@ -709,6 +710,77 @@ def self.removeHostFromSSHConfig(nodename, noop: false) end end + # Evict ssh keys associated with a particular deploy from our ssh config + # and key directory. + # @param deploy_id [String] + # @param noop [Boolean] + def self.purgeDeployFromSSH(deploy_id, noop: false) + myhome = Etc.getpwuid(Process.uid).dir + sshdir = "#{myhome}/.ssh" + sshconf = "#{sshdir}/config" + ssharchive = "#{sshdir}/archive" + + Dir.mkdir(sshdir, 0700) if !Dir.exist?(sshdir) and !noop + Dir.mkdir(ssharchive, 0700) if !Dir.exist?(ssharchive) and !noop + + keyname = "deploy-#{deploy_id}" + if File.exist?("#{sshdir}/#{keyname}") + MU.log "Moving #{sshdir}/#{keyname} to #{ssharchive}/#{keyname}" + if !noop + File.rename("#{sshdir}/#{keyname}", "#{ssharchive}/#{keyname}") + end + end + if File.exist?(sshconf) and File.open(sshconf).read.match(/\/deploy\-#{deploy_id}$/) + MU.log "Expunging #{deploy_id} from #{sshconf}" + if !noop + FileUtils.copy(sshconf, "#{ssharchive}/config-#{deploy_id}") + File.open(sshconf, File::CREAT|File::RDWR, 0600) { |f| + f.flock(File::LOCK_EX) + newlines = Array.new + delete_block = false + f.readlines.each { |line| + if line.match(/^Host #{deploy_id}\-/) + delete_block = true + elsif line.match(/^Host /) + delete_block = false + end + newlines << line if !delete_block + } + f.rewind + f.truncate(0) + f.puts(newlines) + f.flush + f.flock(File::LOCK_UN) + } + end + end + # XXX refactor with above? They're similar, ish. + hostsfile = "/etc/hosts" + if File.open(hostsfile).read.match(/ #{deploy_id}\-/) + if Process.uid == 0 + MU.log "Expunging traces of #{deploy_id} from #{hostsfile}" + if !noop + FileUtils.copy(hostsfile, "#{hostsfile}.cleanup-#{deploy_id}") + File.open(hostsfile, File::CREAT|File::RDWR, 0644) { |f| + f.flock(File::LOCK_EX) + newlines = Array.new + f.readlines.each { |line| + newlines << line if !line.match(/ #{deploy_id}\-/) + } + f.rewind + f.truncate(0) + f.puts(newlines) + f.flush + f.flock(File::LOCK_UN) + } + end + else + MU.log "Residual /etc/hosts entries for #{deploy_id} must be removed by root user", MU::WARN + end + end + + end + # Ensure that the Nagios configuration local to the MU master has been # updated, and make sure Nagios has all of the ssh keys it needs to tunnel # to client nodes. @@ -738,7 +810,7 @@ def self.syncMonitoringConfig(blocking = true) ssh_conf.puts " IdentityFile #{NAGIOS_HOME}/.ssh/id_rsa" ssh_conf.puts " StrictHostKeyChecking no" ssh_conf.close - FileUtils.cp("#{@myhome}/.ssh/id_rsa", "#{NAGIOS_HOME}/.ssh/id_rsa") + FileUtils.cp("#{Etc.getpwuid(Process.uid).dir}/.ssh/id_rsa", "#{NAGIOS_HOME}/.ssh/id_rsa") File.chown(Etc.getpwnam("nagios").uid, Etc.getpwnam("nagios").gid, "#{NAGIOS_HOME}/.ssh/id_rsa") threads = [] @@ -751,7 +823,7 @@ def self.syncMonitoringConfig(blocking = true) MU.log "Failed to extract ssh key name from #{deploy_id} in syncMonitoringConfig", MU::ERR if deploy.kittens.has_key?("servers") next end - FileUtils.cp("#{@myhome}/.ssh/#{deploy.ssh_key_name}", "#{NAGIOS_HOME}/.ssh/#{deploy.ssh_key_name}") + FileUtils.cp("#{Etc.getpwuid(Process.uid).dir}/.ssh/#{deploy.ssh_key_name}", "#{NAGIOS_HOME}/.ssh/#{deploy.ssh_key_name}") File.chown(Etc.getpwnam("nagios").uid, Etc.getpwnam("nagios").gid, "#{NAGIOS_HOME}/.ssh/#{deploy.ssh_key_name}") if deploy.kittens.has_key?("servers") deploy.kittens["servers"].values.each { |nodeclasses| diff --git a/modules/mu/mommacat.rb b/modules/mu/mommacat.rb index 94aaa940f..9282c0bf7 100644 --- a/modules/mu/mommacat.rb +++ b/modules/mu/mommacat.rb @@ -19,6 +19,7 @@ require 'securerandom' require 'timeout' require 'mu/mommacat/storage' +require 'mu/mommacat/search' require 'mu/mommacat/daemon' require 'mu/mommacat/naming' @@ -154,7 +155,7 @@ def initialize(deploy_id, if @mu_user == "root" @chef_user = "mu" else - @chef_user = @mu_user.dup.gsub(/\./, "") + @chef_user = @mu_user.dup.delete(".") @mu_user = "root" if @mu_user == "mu" end @kitten_semaphore = Mutex.new @@ -187,54 +188,10 @@ def initialize(deploy_id, end if create and !@no_artifacts - if !Dir.exist?(MU.dataDir+"/deployments") - MU.log "Creating #{MU.dataDir}/deployments", MU::DEBUG - Dir.mkdir(MU.dataDir+"/deployments", 0700) - end - path = File.expand_path(MU.dataDir+"/deployments")+"/"+@deploy_id - if !Dir.exist?(path) - MU.log "Creating #{path}", MU::DEBUG - Dir.mkdir(path, 0700) - end - if @original_config.nil? or !@original_config.is_a?(Hash) - raise DeployInitializeError, "New MommaCat repository requires config hash" - end - credsets = {} - - MU::Cloud.resource_types.values.each { |attrs| - if !@original_config[attrs[:cfg_plural]].nil? and @original_config[attrs[:cfg_plural]].size > 0 - @original_config[attrs[:cfg_plural]].each { |resource| - - credsets[resource['cloud']] ||= [] - credsets[resource['cloud']] << resource['credentials'] - @clouds[resource['cloud']] = 0 if !@clouds.has_key?(resource['cloud']) - @clouds[resource['cloud']] = @clouds[resource['cloud']] + 1 - - } - end - } - - @ssh_key_name, @ssh_private_key, @ssh_public_key = self.SSHKey - if !File.exist?(deploy_dir+"/private_key") - @private_key, @public_key = createDeployKey - end - MU.log "Creating deploy secret for #{MU.deploy_id}" - @deploy_secret = Password.random(256) - if !@original_config['scrub_mu_isms'] and !@no_artifacts - credsets.each_pair { |cloud, creds| - creds.uniq! - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - creds.each { |credentials| - cloudclass.writeDeploySecret(@deploy_id, @deploy_secret, credentials: credentials) - } - } - end - if set_context_to_me - MU::MommaCat.setThreadContext(self) - end - + initDeployDirectory + setDeploySecret + MU::MommaCat.setThreadContext(self) if set_context_to_me save! - end @appname ||= MU.appname @@ -242,10 +199,8 @@ def initialize(deploy_id, @environment ||= MU.environment loadDeploy(set_context_to_me: set_context_to_me) - if !deploy_secret.nil? - if !authKey(deploy_secret) - raise DeployInitializeError, "Client request did not include a valid deploy authorization secret. Verify that userdata runs correctly?" - end + if !deploy_secret.nil? and !authKey(deploy_secret) + raise DeployInitializeError, "Client request did not include a valid deploy authorization secret. Verify that userdata runs correctly?" end @@ -257,86 +212,7 @@ def initialize(deploy_id, # deploy, IF it already exists, which is to say if we're loading an # existing deploy instead of creating a new one. if !create and @deployment and @original_config and !skip_resource_objects - - MU::Cloud.resource_types.each_pair { |res_type, attrs| - type = attrs[:cfg_plural] - if @deployment.has_key?(type) - - @deployment[type].each_pair { |res_name, data| - orig_cfg = nil - if @original_config.has_key?(type) - @original_config[type].each { |resource| - if resource["name"] == res_name - orig_cfg = resource - break - end - } - end - - # Some Server objects originated from ServerPools, get their - # configs from there - if type == "servers" and orig_cfg.nil? and - @original_config.has_key?("server_pools") - @original_config["server_pools"].each { |resource| - if resource["name"] == res_name - orig_cfg = resource - break - end - } - end - - if orig_cfg.nil? - MU.log "Failed to locate original config for #{attrs[:cfg_name]} #{res_name} in #{@deploy_id}", MU::WARN if !["firewall_rules", "databases", "storage_pools", "cache_clusters", "alarms"].include?(type) # XXX shaddap - next - end - - if orig_cfg['vpc'] and orig_cfg['vpc'].is_a?(Hash) - ref = if orig_cfg['vpc']['id'] and orig_cfg['vpc']['id'].is_a?(Hash) - orig_cfg['vpc']['id']['mommacat'] = self - MU::Config::Ref.get(orig_cfg['vpc']['id']) - else - orig_cfg['vpc']['mommacat'] = self - MU::Config::Ref.get(orig_cfg['vpc']) - end - orig_cfg['vpc'].delete('mommacat') - orig_cfg['vpc'] = ref if ref.kitten(shallow: true) - end - - begin - # Load up MU::Cloud objects for all our kittens in this deploy - orig_cfg['environment'] = @environment # not always set in old deploys - if attrs[:has_multiples] - data.keys.each { |mu_name| - attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: mu_name, delay_descriptor_load: delay_descriptor_load) - } - else - # XXX hack for old deployments, this can go away some day - if data['mu_name'].nil? or data['mu_name'].empty? - if res_type.to_s == "LoadBalancer" and !data['awsname'].nil? - data['mu_name'] = data['awsname'].dup - elsif res_type.to_s == "FirewallRule" and !data['group_name'].nil? - data['mu_name'] = data['group_name'].dup - elsif res_type.to_s == "Database" and !data['identifier'].nil? - data['mu_name'] = data['identifier'].dup.upcase - elsif res_type.to_s == "VPC" - # VPC names are deterministic, just generate the things - data['mu_name'] = getResourceName(data['name']) - end - end - if data['mu_name'].nil? - raise MuError, "Unable to find or guess a Mu name for #{res_type}: #{res_name} in #{@deploy_id}" - end - attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: data['mu_name'], cloud_id: data['cloud_id']) - end - rescue StandardError => e - if e.class != MU::Cloud::MuCloudResourceNotImplemented - MU.log "Failed to load an existing resource of type '#{type}' in #{@deploy_id}: #{e.inspect}", MU::WARN, details: e.backtrace - end - end - } - - end - } + loadObjects(delay_descriptor_load) end @initializing = false @@ -349,7 +225,7 @@ def initialize(deploy_id, def cloudsUsed seen = [] seen << @original_config['cloud'] if @original_config['cloud'] - MU::Cloud.resource_types.values.each { |attrs| + MU::Cloud.resource_types.each_value { |attrs| type = attrs[:cfg_plural] if @original_config[type] @original_config[type].each { |resource| @@ -369,18 +245,15 @@ def credsUsed # clouds = [] seen << @original_config['credentials'] if @original_config['credentials'] # defaultcloud = @original_config['cloud'] - MU::Cloud.resource_types.values.each { |attrs| + MU::Cloud.resource_types.each_value { |attrs| type = attrs[:cfg_plural] if @original_config[type] @original_config[type].each { |resource| if resource['credentials'] seen << resource['credentials'] else - cloudclass = if @original_config['cloud'] - Object.const_get("MU").const_get("Cloud").const_get(@original_config['cloud']) - else - Object.const_get("MU").const_get("Cloud").const_get(MU::Config.defaultCloud) - end + cloudconst = @original_config['cloud'] ? @original_config['cloud'] : MU::Config.defaultCloud + Object.const_get("MU").const_get("Cloud").const_get(cloudconst) seen << cloudclass.credConfig(name_only: true) end } @@ -404,7 +277,7 @@ def habitatsUsed end end - MU::Cloud.resource_types.values.each { |attrs| + MU::Cloud.resource_types.each_value { |attrs| type = attrs[:cfg_plural] if @original_config[type] @original_config[type].each { |resource| @@ -481,7 +354,7 @@ def numKittens(clouds: [], types: [], negate: false) end count = 0 - MU::Cloud.resource_types.values.each { |data| + MU::Cloud.resource_types.each_value { |data| next if @original_config[data[:cfg_plural]].nil? next if realtypes.size > 0 and (!negate and !realtypes.include?(data[:cfg_plural])) @original_config[data[:cfg_plural]].each { |resource| @@ -499,13 +372,13 @@ def removeKitten(object) raise MuError, "Nil arguments to removeKitten are not allowed" end @kitten_semaphore.synchronize { - MU::Cloud.resource_types.values.each { |attrs| + MU::Cloud.resource_types.each_value { |attrs| type = attrs[:cfg_plural] next if !@kittens.has_key?(type) tmplitter = @kittens[type].values.dup tmplitter.each { |nodeclass, data| if data.is_a?(Hash) - data.keys.each { |mu_name| + data.each_key { |mu_name| if data == object @kittens[type][nodeclass].delete(mu_name) return @@ -534,13 +407,12 @@ def addKitten(type, name, object) end _shortclass, _cfg_name, type, _classname, attrs = MU::Cloud.getResourceNames(type) - has_multiples = attrs[:has_multiples] object.intoDeploy(self) @kitten_semaphore.synchronize { @kittens[type] ||= {} @kittens[type][object.habitat] ||= {} - if has_multiples + if attrs[:has_multiples] @kittens[type][object.habitat][name] ||= {} @kittens[type][object.habitat][name][object.mu_name] = object else @@ -577,7 +449,7 @@ def saveNodeSecret(instance_id, raw_secret, type) loadDeploy(true) # make sure we're not trampling deployment data @secret_semaphore.synchronize { if @secrets[type].nil? - raise SecretError, "'#{type}' is not a valid secret type (valid types: #{@secrets.keys.to_s})" + raise SecretError, "'#{type}' is not a valid secret type (valid types: #{@secrets.keys.join(", ")})" end @secrets[type][instance_id] = encryptWithDeployKey(raw_secret) } @@ -593,7 +465,7 @@ def fetchSecret(instance_id, type, quiet: false) @secret_semaphore.synchronize { if @secrets[type].nil? return nil if quiet - raise SecretError, "'#{type}' is not a valid secret type (valid types: #{@secrets.keys.to_s})" + raise SecretError, "'#{type}' is not a valid secret type (valid types: #{@secrets.keys.join(", ")})" end if @secrets[type][instance_id].nil? return nil if quiet @@ -654,645 +526,88 @@ def SSHKey @@dummy_cache = {} - # Locate a resource that's either a member of another deployment, or of no - # deployment at all, and return a {MU::Cloud} object for it. - # @param cloud [String]: The Cloud provider to use. - # @param type [String]: The resource type. Can be the full class name, symbolic name, or Basket of Kittens configuration shorthand for the resource type. - # @param deploy_id [String]: The identifier of an outside deploy to search. - # @param name [String]: The name of the resource as defined in its 'name' Basket of Kittens field, typically used in conjunction with deploy_id. - # @param mu_name [String]: The fully-resolved and deployed name of the resource, typically used in conjunction with deploy_id. - # @param cloud_id [String]: A cloud provider identifier for this resource. - # @param region [String]: The cloud provider region - # @param tag_key [String]: A cloud provider tag to help identify the resource, used in conjunction with tag_value. - # @param tag_value [String]: A cloud provider tag to help identify the resource, used in conjunction with tag_key. - # @param allow_multi [Boolean]: Permit an array of matching resources to be returned (if applicable) instead of just one. - # @param dummy_ok [Boolean]: Permit return of a faked {MU::Cloud} object if we don't have enough information to identify a real live one. - # @param flags [Hash]: Other cloud or resource type specific options to pass to that resource's find() method - # @return [Array] - def self.findStray( - cloud, - type, - deploy_id: nil, - name: nil, - mu_name: nil, - cloud_id: nil, - credentials: nil, - region: nil, - tag_key: nil, - tag_value: nil, - allow_multi: false, - calling_deploy: MU.mommacat, - flags: {}, - habitats: [], - dummy_ok: false, - debug: false, - no_deploy_search: false - ) - start = Time.now - callstr = "findStray(cloud: #{cloud}, type: #{type}, deploy_id: #{deploy_id}, calling_deploy: #{calling_deploy.deploy_id if !calling_deploy.nil?}, name: #{name}, cloud_id: #{cloud_id}, tag_key: #{tag_key}, tag_value: #{tag_value}, credentials: #{credentials}, habitats: #{habitats ? habitats.to_s : "[]"}, dummy_ok: #{dummy_ok.to_s}, flags: #{flags.to_s}) from #{caller[0]}" -# callstack = caller.dup - - return nil if cloud == "CloudFormation" and !cloud_id.nil? - shortclass, _cfg_name, cfg_plural, classname, _attrs = MU::Cloud.getResourceNames(type) - if !MU::Cloud.supportedClouds.include?(cloud) or shortclass.nil? - MU.log "findStray was called with bogus cloud argument '#{cloud}'", MU::WARN, details: callstr - return nil - end - - begin - # TODO this is dumb as hell, clean this up.. and while we're at it - # .dup everything so we don't mangle referenced values from the caller - deploy_id = deploy_id.to_s if deploy_id.class.to_s == "MU::Config::Tail" - name = name.to_s if name.class.to_s == "MU::Config::Tail" - cloud_id = cloud_id.to_s if !cloud_id.nil? - mu_name = mu_name.to_s if mu_name.class.to_s == "MU::Config::Tail" - tag_key = tag_key.to_s if tag_key.class.to_s == "MU::Config::Tail" - tag_value = tag_value.to_s if tag_value.class.to_s == "MU::Config::Tail" - type = cfg_plural - resourceclass = MU::Cloud.loadCloudType(cloud, shortclass) - cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) - - credlist = if credentials - [credentials] - else - cloudclass.listCredentials - end - - if (tag_key and !tag_value) or (!tag_key and tag_value) - raise MuError, "Can't call findStray with only one of tag_key and tag_value set, must be both or neither" - end - # Help ourselves by making more refined parameters out of mu_name, if - # they weren't passed explicitly - if mu_name - if !tag_key and !tag_value - # XXX "Name" is an AWS-ism, perhaps those plugins should do this bit? - tag_key="Name" - tag_value=mu_name - end - # We can extract a deploy_id from mu_name if we don't have one already - if !deploy_id and mu_name - deploy_id = mu_name.sub(/^(\w+-\w+-\d{10}-[A-Z]{2})-/, '\1') - end - end - loglevel = debug ? MU::NOTICE : MU::DEBUG - - MU.log callstr, loglevel, details: caller - - # See if the thing we're looking for is a member of the deploy that's - # asking after it. - if !deploy_id.nil? and !calling_deploy.nil? and - calling_deploy.deploy_id == deploy_id and (!name.nil? or !mu_name.nil?) - handle = calling_deploy.findLitterMate(type: type, name: name, mu_name: mu_name, cloud_id: cloud_id, credentials: credentials) - return [handle] if !handle.nil? - end - - kittens = {} - # Search our other deploys for matching resources - if !no_deploy_search and (deploy_id or name or mu_name or cloud_id) - MU.log "findStray: searching my deployments (#{cfg_plural}, name: #{name}, deploy_id: #{deploy_id}, mu_name: #{mu_name}) - #{sprintf("%.2fs", (Time.now-start))}", loglevel - - # Check our in-memory cache of live deploys before resorting to - # metadata - littercache = nil - # Sometimes we're called inside a locked thread, sometimes not. Deal - # with locking gracefully. - begin - @@litter_semaphore.synchronize { - littercache = @@litters.dup - } - rescue ThreadError => e - raise e if !e.message.match(/recursive locking/) - littercache = @@litters.dup - end - - littercache.each_pair { |cur_deploy, momma| - next if deploy_id and deploy_id != cur_deploy - - straykitten = momma.findLitterMate(type: type, cloud_id: cloud_id, name: name, mu_name: mu_name, credentials: credentials, created_only: true) - if straykitten - MU.log "Found matching kitten #{straykitten.mu_name} in-memory - #{sprintf("%.2fs", (Time.now-start))}", loglevel - # Peace out if we found the exact resource we want - if cloud_id and straykitten.cloud_id.to_s == cloud_id.to_s - return [straykitten] - elsif mu_name and straykitten.mu_name == mu_name - return [straykitten] - else - kittens[straykitten.cloud_id] ||= straykitten - end - end - } - - mu_descs = MU::MommaCat.getResourceMetadata(cfg_plural, name: name, deploy_id: deploy_id, mu_name: mu_name) - MU.log "findStray: #{mu_descs.size.to_s} deploys had matches - #{sprintf("%.2fs", (Time.now-start))}", loglevel - - mu_descs.each_pair { |cur_deploy_id, matches| - MU.log "findStray: #{cur_deploy_id} had #{matches.size.to_s} initial matches - #{sprintf("%.2fs", (Time.now-start))}", loglevel - next if matches.nil? or matches.size == 0 - - momma = MU::MommaCat.getLitter(cur_deploy_id) - - straykitten = nil - - # If we found exactly one match in this deploy, use its metadata to - # guess at resource names we weren't told. - if matches.size > 1 and cloud_id - MU.log "findStray: attempting to narrow down multiple matches with cloud_id #{cloud_id} - #{sprintf("%.2fs", (Time.now-start))}", loglevel - straykitten = momma.findLitterMate(type: type, cloud_id: cloud_id, credentials: credentials, created_only: true) - elsif matches.size == 1 and name.nil? and mu_name.nil? - if cloud_id.nil? - straykitten = momma.findLitterMate(type: type, name: matches.first["name"], cloud_id: matches.first["cloud_id"], credentials: credentials) - else - MU.log "findStray: fetching single match with cloud_id #{cloud_id} - #{sprintf("%.2fs", (Time.now-start))}", loglevel - straykitten = momma.findLitterMate(type: type, name: matches.first["name"], cloud_id: cloud_id, credentials: credentials) - end -# elsif !flags.nil? and !flags.empty? # XXX eh, maybe later -# # see if we can narrow it down further with some flags -# filtered = [] -# matches.each { |m| -# f = resourceclass.find(cloud_id: m['mu_name'], flags: flags) -# filtered << m if !f.nil? and f.size > 0 -# MU.log "RESULT FROM find(cloud_id: #{m['mu_name']}, flags: #{flags})", MU::WARN, details: f -# } -# if filtered.size == 1 -# straykitten = momma.findLitterMate(type: type, name: matches.first["name"], cloud_id: filtered.first['cloud_id']) -# end - else - # There's more than one of this type of resource in the target - # deploy, so see if findLitterMate can narrow it down for us - straykitten = momma.findLitterMate(type: type, name: name, mu_name: mu_name, cloud_id: cloud_id, credentials: credentials) - end - - next if straykitten.nil? - straykitten.intoDeploy(momma) - - if straykitten.cloud_id.nil? - MU.log "findStray: kitten #{straykitten.mu_name} came back with nil cloud_id", MU::WARN - next - end - - kittens[straykitten.cloud_id] ||= straykitten - - # Peace out if we found the exact resource we want - if cloud_id and straykitten.cloud_id.to_s == cloud_id.to_s - return [straykitten] - # ...or if we've validated our one possible match - elsif !cloud_id and mu_descs.size == 1 and matches.size == 1 - return [straykitten] - elsif credentials and credlist.size == 1 and straykitten.credentials == credentials - return [straykitten] - end - } - - -# if !mu_descs.nil? and mu_descs.size > 0 and !deploy_id.nil? and !deploy_id.empty? and !mu_descs.first.empty? -# MU.log "I found descriptions that might match #{resourceclass.cfg_plural} name: #{name}, deploy_id: #{deploy_id}, mu_name: #{mu_name}, but couldn't isolate my target kitten", MU::WARN, details: caller -# puts File.read(deploy_dir(deploy_id)+"/deployment.json") -# end - - # We can't refine any further by asking the cloud provider... - if !cloud_id and !tag_key and !tag_value and kittens.size > 1 - if !allow_multi - raise MuError, "Multiple matches in MU::MommaCat.findStray where none allowed from deploy_id: '#{deploy_id}', name: '#{name}', mu_name: '#{mu_name}' (#{caller[0]})" - else - return kittens.values - end - end - end - - matches = [] - - found_the_thing = false - credlist.each { |creds| - break if found_the_thing - if cloud_id or (tag_key and tag_value) or !flags.empty? or allow_multi - - regions = begin - region ? [region] : cloudclass.listRegions(credentials: creds) - rescue NoMethodError # Not all cloud providers have regions - [nil] - end - - # ..not all resource types care about regions either - if resourceclass.isGlobal? - regions = [nil] - end - - # Decide what habitats (accounts/projects/subscriptions) we'll - # search, if applicable for this resource type. - habitats ||= [] - begin - if flags["project"] # backwards-compat - habitats << flags["project"] - end - if habitats.empty? - if resourceclass.canLiveIn.include?(nil) - habitats << nil - end - if resourceclass.canLiveIn.include?(:Habitat) - habitats.concat(cloudclass.listProjects(creds)) - end - end - rescue NoMethodError # we only expect this to work on Google atm - end - - if habitats.empty? - habitats << nil - end - habitats.uniq! - - habitat_threads = [] - desc_semaphore = Mutex.new - - cloud_descs = {} - habitats.each { |hab| - begin - habitat_threads.each { |t| t.join(0.1) } - habitat_threads.reject! { |t| t.nil? or !t.status } - sleep 1 if habitat_threads.size > 5 - end while habitat_threads.size > 5 - habitat_threads << Thread.new(hab) { |p| - MU.log "findStray: Searching #{p} (#{habitat_threads.size.to_s} habitat threads running) - #{sprintf("%.2fs", (Time.now-start))}", loglevel - cloud_descs[p] = {} - region_threads = [] - regions.each { |reg| region_threads << Thread.new(reg) { |r| - MU.log "findStray: Searching #{r} in #{p} (#{region_threads.size.to_s} region threads running) - #{sprintf("%.2fs", (Time.now-start))}", loglevel - MU.log "findStray: calling #{classname}.find(cloud_id: #{cloud_id}, region: #{r}, tag_key: #{tag_key}, tag_value: #{tag_value}, flags: #{flags}, credentials: #{creds}, project: #{p}) - #{sprintf("%.2fs", (Time.now-start))}", loglevel - found = resourceclass.find(cloud_id: cloud_id, region: r, tag_key: tag_key, tag_value: tag_value, flags: flags, credentials: creds, habitat: p) - MU.log "findStray: #{found ? found.size.to_s : "nil"} results - #{sprintf("%.2fs", (Time.now-start))}", loglevel - - if found - desc_semaphore.synchronize { - cloud_descs[p][r] = found - } - end - # Stop if you found the thing by a specific cloud_id - if cloud_id and found and !found.empty? - found_the_thing = true - Thread.exit - end - } } - begin - region_threads.each { |t| t.join(0.1) } - region_threads.reject! { |t| t.nil? or !t.status } - if region_threads.size > 0 - MU.log "#{region_threads.size.to_s} regions still running in #{p}", loglevel - sleep 3 - end - end while region_threads.size > 0 - } - } - begin - habitat_threads.each { |t| t.join(0.1) } - habitat_threads.reject! { |t| t.nil? or !t.status } - if habitat_threads.size > 0 - MU.log "#{habitat_threads.size.to_s} habitats still running", loglevel - sleep 3 - end - end while habitat_threads.size > 0 - - habitat_threads = [] - habitats.each { |hab| habitat_threads << Thread.new(hab) { |p| - region_threads = [] - regions.each { |reg| region_threads << Thread.new(reg) { |r| - next if cloud_descs[p][r].nil? - cloud_descs[p][r].each_pair { |kitten_cloud_id, descriptor| - - # We already have a MU::Cloud object for this guy, use it - if kittens.has_key?(kitten_cloud_id) - desc_semaphore.synchronize { - matches << kittens[kitten_cloud_id] - } - elsif kittens.size == 0 - if !dummy_ok - next - end - - # If we don't have a MU::Cloud object, manufacture a dummy - # one. Give it a fake name if we have to and have decided - # that's ok. Wild inferences from the cloud descriptor are - # ok to try here. - use_name = if (name.nil? or name.empty?) - if !dummy_ok - nil - elsif !mu_name.nil? - mu_name - # AWS-style tags - elsif descriptor.respond_to?(:tags) and - descriptor.tags.is_a?(Array) and - descriptor.tags.first.respond_to?(:key) and - descriptor.tags.map { |t| t.key }.include?("Name") - descriptor.tags.select { |t| t.key == "Name" }.first.value - else - try = nil - # Various GCP fields - [:display_name, :name, (resourceclass.cfg_name+"_name").to_sym].each { |field| - if descriptor.respond_to?(field) and descriptor.send(field).is_a?(String) - try = descriptor.send(field) - break - end - - } - try ||= if !tag_value.nil? - tag_value - else - kitten_cloud_id - end - try - end - else - name - end - if use_name.nil? - MU.log "Found cloud provider data for #{cloud} #{type} #{kitten_cloud_id}, but without a name I can't manufacture a proper #{type} object to return - #{sprintf("%.2fs", (Time.now-start))}", loglevel, details: caller - next - end - cfg = { - "name" => use_name, - "cloud" => cloud, - "credentials" => creds - } - if !r.nil? and !resourceclass.isGlobal? - cfg["region"] = r - end - - if !p.nil? and resourceclass.canLiveIn.include?(:Habitat) - cfg["project"] = p - end - # If we can at least find the config from the deploy this will - # belong with, use that, even if it's an ungroomed resource. - if !calling_deploy.nil? and - !calling_deploy.original_config.nil? and - !calling_deploy.original_config[type+"s"].nil? - calling_deploy.original_config[type+"s"].each { |s| - if s["name"] == use_name - cfg = s.dup - break - end - } - - newkitten = resourceclass.new(mommacat: calling_deploy, kitten_cfg: cfg, cloud_id: kitten_cloud_id) - desc_semaphore.synchronize { - matches << newkitten - } - else - if !@@dummy_cache[cfg_plural] or !@@dummy_cache[cfg_plural][cfg.to_s] - MU.log "findStray: Generating dummy '#{resourceclass.to_s}' cloudobj with name: #{use_name}, cloud_id: #{kitten_cloud_id.to_s} - #{sprintf("%.2fs", (Time.now-start))}", loglevel, details: cfg - resourceclass.new(mu_name: use_name, kitten_cfg: cfg, cloud_id: kitten_cloud_id.to_s, from_cloud_desc: descriptor) - desc_semaphore.synchronize { - @@dummy_cache[cfg_plural] ||= {} - @@dummy_cache[cfg_plural][cfg.to_s] = resourceclass.new(mu_name: use_name, kitten_cfg: cfg, cloud_id: kitten_cloud_id.to_s, from_cloud_desc: descriptor) - MU.log "findStray: Finished generating dummy '#{resourceclass.to_s}' cloudobj - #{sprintf("%.2fs", (Time.now-start))}", loglevel - } - end - desc_semaphore.synchronize { - matches << @@dummy_cache[cfg_plural][cfg.to_s] - } - end - end - } - } } - MU.log "findStray: tying up #{region_threads.size.to_s} region threads - #{sprintf("%.2fs", (Time.now-start))}", loglevel - region_threads.each { |t| - t.join - } - } } - MU.log "findStray: tying up #{habitat_threads.size.to_s} habitat threads - #{sprintf("%.2fs", (Time.now-start))}", loglevel - habitat_threads.each { |t| - t.join - } - end - } - rescue StandardError => e - MU.log e.inspect, MU::ERR, details: e.backtrace - end - MU.log "findStray: returning #{matches ? matches.size.to_s : "0"} matches - #{sprintf("%.2fs", (Time.now-start))}", loglevel - - matches - end - - # Return the resource object of another member of this deployment - # @param type [String,Symbol]: The type of resource - # @param name [String]: The name of the resource as defined in its 'name' Basket of Kittens field - # @param mu_name [String]: The fully-resolved and deployed name of the resource - # @param cloud_id [String]: The cloud provider's unique identifier for this resource - # @param created_only [Boolean]: Only return the littermate if its cloud_id method returns a value - # @param return_all [Boolean]: Return a Hash of matching objects indexed by their mu_name, instead of a single match. Only valid for resource types where has_multiples is true. - # @return [MU::Cloud] - def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_only: false, return_all: false, credentials: nil, habitat: nil, debug: false, indent: "") - shortclass, cfg_name, cfg_plural, classname, attrs = MU::Cloud.getResourceNames(type) - type = cfg_plural - has_multiples = attrs[:has_multiples] - - loglevel = debug ? MU::NOTICE : MU::DEBUG - - argstring = [:type, :name, :mu_name, :cloud_id, :created_only, :credentials, :habitat, :has_multiples].reject { |a| - binding.local_variable_get(a).nil? - }.map { |v| - v.to_s+": "+binding.local_variable_get(v).to_s - }.join(", ") - - # Fun times: if we specified a habitat, which we may also have done by - # its shorthand sibling name, let's... call ourselves first to make sure - # we're fishing for the right thing. - if habitat - if habitat.is_a?(MU::Config::Ref) and habitat.id - habitat = habitat.id - else - MU.log indent+"findLitterMate(#{argstring}): Attempting to resolve habitat name #{habitat}", loglevel - realhabitat = findLitterMate(type: "habitat", name: habitat, debug: debug, credentials: credentials, indent: indent+" ") - if realhabitat and realhabitat.mu_name - MU.log indent+"findLitterMate: Resolved habitat name #{habitat} to #{realhabitat.mu_name}", loglevel, details: [realhabitat.mu_name, realhabitat.cloud_id, realhabitat.config.keys] - habitat = realhabitat.cloud_id - elsif debug - MU.log indent+"findLitterMate(#{argstring}): Failed to resolve habitat name #{habitat}", MU::WARN - end - end - end - - - @kitten_semaphore.synchronize { - if !@kittens.has_key?(type) - if debug - MU.log indent+"NO SUCH KEY #{type} findLitterMate(#{argstring})", MU::WARN, details: @kittens.keys - end - return nil - end - MU.log indent+"START findLitterMate(#{argstring}), caller: #{caller[2]}", loglevel, details: @kittens[type].keys.map { |hab| hab.to_s+": "+@kittens[type][hab].keys.join(", ") } - matches = [] - - @kittens[type].each { |habitat_group, sib_classes| - next if habitat and habitat_group != habitat and !habitat_group.nil? - sib_classes.each_pair { |sib_class, data| - virtual_name = nil - - if !has_multiples and data and !data.is_a?(Hash) and data.config and data.config.is_a?(Hash) and data.config['virtual_name'] and name == data.config['virtual_name'] - virtual_name = data.config['virtual_name'] - elsif !name.nil? and name != sib_class - next - end - if has_multiples - if !name.nil? - if return_all - MU.log indent+"MULTI-MATCH RETURN_ALL findLitterMate(#{argstring})", loglevel, details: data.keys - return data.dup - end - if data.size == 1 and (cloud_id.nil? or data.values.first.cloud_id == cloud_id) - return data.values.first - elsif mu_name.nil? and cloud_id.nil? - MU.log indent+"#{@deploy_id}: Found multiple matches in findLitterMate based on #{type}: #{name}, and not enough info to narrow down further. Returning an arbitrary result. Caller: #{caller[2]}", MU::WARN, details: data.keys - return data.values.first - end - end - data.each_pair { |sib_mu_name, obj| - if (!mu_name.nil? and mu_name == sib_mu_name) or - (!cloud_id.nil? and cloud_id == obj.cloud_id) or - (!credentials.nil? and credentials == obj.credentials) - if !created_only or !obj.cloud_id.nil? - if return_all - MU.log indent+"MULTI-MATCH RETURN_ALL findLitterMate(#{argstring})", loglevel, details: data.keys - return data.dup - else - MU.log indent+"MULTI-MATCH findLitterMate(#{argstring})", loglevel, details: data.keys - return obj - end - end - end - } - else - - MU.log indent+"CHECKING AGAINST findLitterMate #{habitat_group}/#{type}/#{sib_class} data.cloud_id: #{data.cloud_id}, data.credentials: #{data.credentials}, sib_class: #{sib_class}, virtual_name: #{virtual_name}", loglevel, details: argstring - - data_cloud_id = data.cloud_id.nil? ? nil : data.cloud_id.to_s - - MU.log indent+"(name.nil? or sib_class == name or virtual_name == name)", loglevel, details: (name.nil? or sib_class == name or virtual_name == name).to_s - MU.log indent+"(cloud_id.nil? or cloud_id[#{cloud_id.class.name}:#{cloud_id.to_s}] == data_cloud_id[#{data_cloud_id.class.name}:#{data_cloud_id}])", loglevel, details: (cloud_id.nil? or cloud_id == data_cloud_id).to_s - MU.log indent+"(credentials.nil? or data.credentials.nil? or credentials[#{credentials.class.name}:#{credentials}] == data.credentials[#{data.credentials.class.name}:#{data.credentials}])", loglevel, details: (credentials.nil? or data.credentials.nil? or credentials == data.credentials).to_s - - if (name.nil? or sib_class == name.to_s or virtual_name == name.to_s) and - (cloud_id.nil? or cloud_id.to_s == data_cloud_id) and - (credentials.nil? or data.credentials.nil? or credentials.to_s == data.credentials.to_s) - MU.log indent+"OUTER MATCH PASSED, NEED !created_only (#{created_only.to_s}) or !data_cloud_id.nil? (#{data_cloud_id})", loglevel, details: (cloud_id.nil? or cloud_id == data_cloud_id).to_s - if !created_only or !data_cloud_id.nil? - MU.log indent+"SINGLE MATCH findLitterMate(#{argstring})", loglevel, details: [data.mu_name, data_cloud_id, data.config.keys] - matches << data - end - end - end - } - } - - return matches.first if matches.size == 1 - if return_all and matches.size > 1 - return matches - end - } - - MU.log indent+"NO MATCH findLitterMate(#{argstring})", loglevel - - return nil - end - # Add or remove a resource's metadata to this deployment's structure and # flush it to disk. # @param type [String]: The type of resource (e.g. *server*, *database*). # @param key [String]: The name field of this resource. + # @param mu_name [String]: The mu_name of this resource. # @param data [Hash]: The resource's metadata. + # @param triggering_node [MU::Cloud]: A cloud object calling this notify, usually on behalf of itself # @param remove [Boolean]: Remove this resource from the deploy structure, instead of adding it. # @return [void] def notify(type, key, data, mu_name: nil, remove: false, triggering_node: nil, delayed_save: false) return if @no_artifacts - MU::MommaCat.lock("deployment-notification") - if !@need_deploy_flush or @deployment.nil? or @deployment.empty? - loadDeploy(true) # make sure we're saving the latest and greatest - end + begin + MU::MommaCat.lock("deployment-notification") - _shortclass, _cfg_name, cfg_plural, _classname, attrs = MU::Cloud.getResourceNames(type) - has_multiples = false + if !@need_deploy_flush or @deployment.nil? or @deployment.empty? + loadDeploy(true) # make sure we're saving the latest and greatest + end - # it's not always the case that we're logging data for a legal resource - # type, though that's what we're usually for - if cfg_plural - type = cfg_plural - has_multiples = attrs[:has_multiples] - end + _shortclass, _cfg_name, type, _classname, attrs = MU::Cloud.getResourceNames(type, false) + has_multiples = attrs[:has_multiples] ? true : false - if mu_name.nil? - if !data.nil? and !data["mu_name"].nil? - mu_name = data["mu_name"] + mu_name ||= if !data.nil? and !data["mu_name"].nil? + data["mu_name"] elsif !triggering_node.nil? and !triggering_node.mu_name.nil? - mu_name = triggering_node.mu_name + triggering_node.mu_name end if mu_name.nil? and has_multiples - MU.log "MU::MommaCat.notify called to modify deployment struct for a type (#{type}) with :has_multiples, but no mu_name available to look under #{key}. Call was #{caller[0]}", MU::WARN, details: data - MU::MommaCat.unlock("deployment-notification") + MU.log "MU::MommaCat.notify called to modify deployment struct for a type (#{type}) with :has_multiples, but no mu_name available to look under #{key}. Call was #{caller(1..1)}", MU::WARN, details: data return end - end - @need_deploy_flush = true + @need_deploy_flush = true - if !remove - if data.nil? - MU.log "MU::MommaCat.notify called to modify deployment struct, but no data provided", MU::WARN - MU::MommaCat.unlock("deployment-notification") - return - end - @notify_semaphore.synchronize { - @deployment[type] ||= {} - } - if has_multiples + if !remove + if data.nil? + MU.log "MU::MommaCat.notify called to modify deployment struct, but no data provided", MU::WARN + return + end @notify_semaphore.synchronize { - @deployment[type][key] ||= {} + @deployment[type] ||= {} } - # fix has_multiples classes that weren't tiered correctly - if @deployment[type][key].is_a?(Hash) and @deployment[type][key].has_key?("mu_name") - olddata = @deployment[type][key].dup - @deployment[type][key][olddata["mu_name"]] = olddata - end - @deployment[type][key][mu_name] = data - MU.log "Adding to @deployment[#{type}][#{key}][#{mu_name}]", MU::DEBUG, details: data - else - @deployment[type][key] = data - MU.log "Adding to @deployment[#{type}][#{key}]", MU::DEBUG, details: data - end - save!(key) if !delayed_save - else - have_deploy = true - if @deployment[type].nil? or @deployment[type][key].nil? - if has_multiples - MU.log "MU::MommaCat.notify called to remove #{type} #{key} #{mu_name} deployment struct, but no such data exist", MU::DEBUG + @notify_semaphore.synchronize { + @deployment[type][key] ||= {} + } + @deployment[type][key][mu_name] = data + MU.log "Adding to @deployment[#{type}][#{key}][#{mu_name}]", MU::DEBUG, details: data else - MU.log "MU::MommaCat.notify called to remove #{type} #{key} deployment struct, but no such data exist", MU::DEBUG + @deployment[type][key] = data + MU.log "Adding to @deployment[#{type}][#{key}]", MU::DEBUG, details: data + end + save!(key) if !delayed_save + else + have_deploy = true + if @deployment[type].nil? or @deployment[type][key].nil? + MU.log "MU::MommaCat.notify called to remove #{type} #{key}#{has_multiples ? " "+mu_name : ""} deployment struct, but no such data exist", MU::DEBUG + return end - MU::MommaCat.unlock("deployment-notification") - return - end + if have_deploy + @notify_semaphore.synchronize { + if has_multiples + MU.log "Removing @deployment[#{type}][#{key}][#{mu_name}]", MU::DEBUG, details: @deployment[type][key][mu_name] + @deployment[type][key].delete(mu_name) + end - if have_deploy - @notify_semaphore.synchronize { - if has_multiples - MU.log "Removing @deployment[#{type}][#{key}][#{mu_name}]", MU::DEBUG, details: @deployment[type][key][mu_name] - @deployment[type][key].delete(mu_name) - if @deployment[type][key].size == 0 + if @deployment[type][key].empty? or !has_multiples + MU.log "Removing @deployment[#{type}][#{key}]", MU::DEBUG, details: @deployment[type][key] @deployment[type].delete(key) end - else - MU.log "Removing @deployment[#{type}][#{key}]", MU::DEBUG, details: @deployment[type][key] - @deployment[type].delete(key) - end - if @deployment[type].size == 0 - @deployment.delete(type) - end - } - end - save! if !delayed_save + if @deployment[type].empty? + @deployment.delete(type) + end + } + end + save! if !delayed_save + end + ensure + MU::MommaCat.unlock("deployment-notification") end - - MU::MommaCat.unlock("deployment-notification") end # Send a Slack notification to a deployment's administrators. @@ -1331,13 +646,13 @@ def sendAdminMail(subject, msg: "", kitten: nil, data: nil, debug: false) to << "#{admin['name']} <#{admin['email']}>" } end - message = < To: #{to.join(",")} Subject: #{subject} #{msg} -MESSAGE_END +MAIL_HEAD_END if !kitten.nil? and kitten.kind_of?(MU::Cloud) message = message + "\n\n**** #{kitten}:\n" if !kitten.report.nil? @@ -1425,124 +740,58 @@ def signSSLCert(csr_path, sans = []) MU::Master::SSL.sign(csr_path, sans, for_user: MU.mu_user) end - # Make sure deployment data is synchronized to/from each node in the + # Make sure deployment data is synchronized to/from each +Server+ in the # currently-loaded deployment. + # @param nodeclasses [Array] + # @param triggering_node [String,MU::Cloud::Server] + # @param save_only [Boolean] def syncLitter(nodeclasses = [], triggering_node: nil, save_only: false) -# XXX take some config logic to decide what nodeclasses to hit? like, make -# inferences from dependencies or something? - - return if MU.syncLitterThread + return if MU.syncLitterThread # don't run recursively by accident return if !Dir.exist?(deploy_dir) - svrs = MU::Cloud.resource_types[:Server][:cfg_plural] # legibility shorthand - if !triggering_node.nil? and nodeclasses.size > 0 - nodeclasses.reject! { |n| n == triggering_node.to_s } - return if nodeclasses.size == 0 - end - - @kitten_semaphore.synchronize { - if @kittens.nil? or - @kittens[svrs].nil? - MU.log "No #{svrs} as yet available in #{@deploy_id}", MU::DEBUG, details: @kittens - return - end + if !triggering_node.nil? and triggering_node.is_a?(MU::Cloud::Server) + triggering_node = triggering_node.mu_name + end - MU.log "Updating these node classes in #{@deploy_id}", MU::DEBUG, details: nodeclasses - } + siblings = findLitterMate(type: "server", return_all: true) + return if siblings.nil? or siblings.empty? update_servers = [] - if nodeclasses.nil? or nodeclasses.size == 0 - litter = findLitterMate(type: "server", return_all: true) - return if litter.nil? - litter.each_pair { |mu_name, node| - if !triggering_node.nil? and ( - (triggering_node.is_a?(MU::Cloud::Server) and mu_name == triggering_node.mu_name) or - (triggering_node.is_a?(String) and mu_name == triggering_node) - ) - next - end - - if !node.groomer.nil? - update_servers << node - end - } - else - litter = {} - nodeclasses.each { |nodeclass| - mates = findLitterMate(type: "server", name: nodeclass, return_all: true) - litter.merge!(mates) if mates - } - litter.each_pair { |mu_name, node| - if !triggering_node.nil? and ( - (triggering_node.is_a?(MU::Cloud::Server) and mu_name == triggering_node.mu_name) or - (triggering_node.is_a?(String) and mu_name == triggering_node) - ) - next - end - - if !node.deploydata or !node.deploydata.keys.include?('nodename') - details = node.deploydata ? node.deploydata.keys : nil - MU.log "#{mu_name} deploy data is missing (possibly retired or mid-bootstrap), so not syncing it", MU::WARN, details: details - else - update_servers << node - end - } - end - return if update_servers.size == 0 - - MU.log "Updating these nodes in #{@deploy_id}", MU::DEBUG, details: update_servers.map { |n| n.mu_name } - - update_servers.each { |node| - # Not clear where this pollution comes from, but let's stick a temp - # fix in here. - if node.deploydata['nodename'] != node.mu_name and - !node.deploydata['nodename'].nil? and !node.deploydata['nodename'].emty? - MU.log "Node #{node.mu_name} had wrong or missing nodename (#{node.deploydata['nodename']}), correcting", MU::WARN - node.deploydata['nodename'] = node.mu_name - if @deployment[svrs] and @deployment[svrs][node.config['name']] and - @deployment[svrs][node.config['name']][node.mu_name] - @deployment[svrs][node.config['name']][node.mu_name]['nodename'] = node.mu_name - end - save! - end + siblings.each_pair { |mu_name, node| + next if mu_name == triggering_node or node.groomer.nil? + next if nodeclasses.size > 0 and !nodeclasses.include?(node.config['name']) + if !node.deploydata or !node.deploydata['nodename'] + MU.log "#{mu_name} deploy data is missing (possibly retired or mid-bootstrap), so not syncing it", MU::NOTICE + next + end + + if @deployment["servers"][node.config['name']][node.mu_name].nil? or + @deployment["servers"][node.config['name']][node.mu_name] != node.deploydata + @deployment["servers"][node.config['name']][node.mu_name] = node.deploydata + elsif !save_only + # Don't bother running grooms on nodes that don't need to be updated, + # unless we're just going to do a save. + next + end + update_servers << node } - # Merge everyone's deploydata together - if !save_only - skip = [] - update_servers.each { |node| - if node.mu_name.nil? or node.deploydata.nil? or node.config.nil? - MU.log "Missing mu_name #{node.mu_name}, deploydata, or config from #{node} in syncLitter", MU::ERR, details: node.deploydata - next - end + return if update_servers.empty? - if !@deployment[svrs][node.config['name']].has_key?(node.mu_name) or @deployment[svrs][node.config['name']][node.mu_name] != node.deploydata - @deployment[svrs][node.config['name']][node.mu_name] = node.deploydata - else - skip << node - end - } - update_servers = update_servers - skip - end + MU.log "Updating nodes in #{@deploy_id}", MU::DEBUG, details: update_servers.map { |n| n.mu_name } - return if MU.inGem? || update_servers.size < 1 threads = [] - parent_thread_id = Thread.current.object_id update_servers.each { |sibling| threads << Thread.new { Thread.abort_on_exception = true - MU.dupGlobals(parent_thread_id) Thread.current.thread_variable_set("name", "sync-"+sibling.mu_name.downcase) MU.setVar("syncLitterThread", true) begin - if sibling.config['groom'].nil? or sibling.config['groom'] - sibling.groomer.saveDeployData - sibling.groomer.run(purpose: "Synchronizing sibling kittens") if !save_only - end + sibling.groomer.saveDeployData + sibling.groomer.run(purpose: "Synchronizing sibling kittens") if !save_only rescue MU::Groomer::RunError => e - MU.log "Sync of #{sibling.mu_name} failed: #{e.inspect}", MU::WARN + MU.log "Sync of #{sibling.mu_name} failed", MU::WARN, details: e.inspect end - MU.purgeGlobals } } diff --git a/modules/mu/mommacat/naming.rb b/modules/mu/mommacat/naming.rb index e337260db..edb3c4045 100644 --- a/modules/mu/mommacat/naming.rb +++ b/modules/mu/mommacat/naming.rb @@ -19,6 +19,34 @@ module MU # the normal synchronous deploy sequence invoked by *mu-deploy*. class MommaCat + # Given a cloud provider's native descriptor for a resource, make some + # reasonable guesses about what the thing's name should be. + def self.guessName(desc, resourceclass, cloud_id: nil, tag_value: nil) + if desc.respond_to?(:tags) and + desc.tags.is_a?(Array) and + desc.tags.first.respond_to?(:key) and + desc.tags.map { |t| t.key }.include?("Name") + desc.tags.select { |t| t.key == "Name" }.first.value + else + try = nil + # Various GCP fields + [:display_name, :name, (resourceclass.cfg_name+"_name").to_sym].each { |field| + if desc.respond_to?(field) and desc.send(field).is_a?(String) + try = desc.send(field) + break + end + + } + try ||= if !tag_value.nil? + tag_value + else + cloud_id + end + try + end + + end + # Generate a three-character string which can be used to unique-ify the # names of resources which might potentially collide, e.g. Windows local # hostnames, Amazon Elastic Load Balancers, or server pool instances. diff --git a/modules/mu/mommacat/search.rb b/modules/mu/mommacat/search.rb new file mode 100644 index 000000000..bb380732e --- /dev/null +++ b/modules/mu/mommacat/search.rb @@ -0,0 +1,463 @@ +# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved +# +# Licensed under the BSD-3 license (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License in the root of the project or at +# +# http://egt-labs.com/mu/LICENSE.html +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module MU + + # MommaCat is in charge of managing metadata about resources we've created, + # as well as orchestrating amongst them and bootstrapping nodes outside of + # the normal synchronous deploy sequence invoked by *mu-deploy*. + class MommaCat + + @@desc_semaphore = Mutex.new + + # A search which returned multiple matches, but is not allowed to + class MultipleMatches < MuError + def initialize(message = nil) + super(message, silent: true) + end + end + + # Locate a resource that's either a member of another deployment, or of no + # deployment at all, and return a {MU::Cloud} object for it. + # @param cloud [String]: The Cloud provider to use. + # @param type [String]: The resource type. Can be the full class name, symbolic name, or Basket of Kittens configuration shorthand for the resource type. + # @param deploy_id [String]: The identifier of an outside deploy to search. + # @param name [String]: The name of the resource as defined in its 'name' Basket of Kittens field, typically used in conjunction with deploy_id. + # @param mu_name [String]: The fully-resolved and deployed name of the resource, typically used in conjunction with deploy_id. + # @param cloud_id [String]: A cloud provider identifier for this resource. + # @param region [String]: The cloud provider region + # @param tag_key [String]: A cloud provider tag to help identify the resource, used in conjunction with tag_value. + # @param tag_value [String]: A cloud provider tag to help identify the resource, used in conjunction with tag_key. + # @param allow_multi [Boolean]: Permit an array of matching resources to be returned (if applicable) instead of just one. + # @param dummy_ok [Boolean]: Permit return of a faked {MU::Cloud} object if we don't have enough information to identify a real live one. + # @return [Array] + def self.findStray(cloud, type, + dummy_ok: false, + no_deploy_search: false, + allow_multi: false, + deploy_id: nil, + name: nil, + mu_name: nil, + cloud_id: nil, + credentials: nil, + region: nil, + tag_key: nil, + tag_value: nil, + calling_deploy: MU.mommacat, + habitats: [], + **flags + ) + _shortclass, _cfg_name, type, _classname, _attrs = MU::Cloud.getResourceNames(type, true) + + cloudclass = MU::Cloud.assertSupportedCloud(cloud) + return nil if cloudclass.virtual? + + if (tag_key and !tag_value) or (!tag_key and tag_value) + raise MuError, "Can't call findStray with only one of tag_key and tag_value set, must be both or neither" + end + + credlist = credentials ? [credentials] : cloudclass.listCredentials + + # Help ourselves by making more refined parameters out of mu_name, if + # they weren't passed explicitly + if mu_name + # We can extract a deploy_id from mu_name if we don't have one already + deploy_id ||= mu_name.sub(/^(\w+-\w+-\d{10}-[A-Z]{2})-/, '\1') + if !tag_key and !tag_value + tag_key = "Name" + tag_value = mu_name + end + end + + # See if the thing we're looking for is a member of the deploy that's + # asking after it. + if !deploy_id.nil? and !calling_deploy.nil? and + calling_deploy.deploy_id == deploy_id and (!name.nil? or !mu_name.nil?) + kitten = calling_deploy.findLitterMate(type: type, name: name, mu_name: mu_name, cloud_id: cloud_id, credentials: credentials) + return [kitten] if !kitten.nil? + end + + # See if we have it in deployment metadata generally + kittens = {} + if !no_deploy_search and (deploy_id or name or mu_name or cloud_id) + kittens = search_my_deploys(type, deploy_id: deploy_id, name: name, mu_name: mu_name, cloud_id: cloud_id, credentials: credentials) + return kittens.values if kittens.size == 1 + + # We can't refine any further by asking the cloud provider... + if kittens.size > 1 and !allow_multi and + !cloud_id and !tag_key and !tag_value + raise MultipleMatches, "Multiple matches in MU::MommaCat.findStray where none allowed from #{cloud}, #{type}, name: #{name}, mu_name: #{mu_name}, cloud_id: #{cloud_id}, credentials: #{credentials}, habitats: #{habitats} (#{caller(1..1)})" + end + end + + if !cloud_id and !(tag_key and tag_value) and (name or mu_name or deploy_id) + return kittens.values + end + matches = [] + + credlist.each { |creds| + cloud_descs = search_cloud_provider(type, cloud, habitats, region, cloud_id: cloud_id, tag_key: tag_key, tag_value: tag_value, credentials: creds, flags: flags) + + cloud_descs.each_pair.each { |p, regions| + regions.each_pair.each { |r, results| + results.each_pair { |kitten_cloud_id, descriptor| + # We already have a MU::Cloud object for this guy, use it + if kittens.has_key?(kitten_cloud_id) + matches << kittens[kitten_cloud_id] + elsif dummy_ok and kittens.empty? +# XXX this is why this was threaded + matches << generate_dummy_object(type, cloud, name, mu_name, kitten_cloud_id, descriptor, r, p, tag_value, calling_deploy, creds) + end + } + } + } + } + + matches + end + + # Return the resource object of another member of this deployment + # @param type [String,Symbol]: The type of resource + # @param name [String]: The name of the resource as defined in its 'name' Basket of Kittens field + # @param mu_name [String]: The fully-resolved and deployed name of the resource + # @param cloud_id [String]: The cloud provider's unique identifier for this resource + # @param created_only [Boolean]: Only return the littermate if its cloud_id method returns a value + # @param return_all [Boolean]: Return a Hash of matching objects indexed by their mu_name, instead of a single match. Only valid for resource types where has_multiples is true. + # @return [MU::Cloud] + def findLitterMate(type: nil, name: nil, mu_name: nil, cloud_id: nil, created_only: false, return_all: false, credentials: nil, habitat: nil, **flags) + _shortclass, _cfg_name, type, _classname, attrs = MU::Cloud.getResourceNames(type) + + # If we specified a habitat, which we may also have done by its shorthand + # sibling name, or a Ref. Convert to something we can use. + habitat = resolve_habitat(habitat, credentials: credentials) + + nofilter = (mu_name.nil? and cloud_id.nil? and credentials.nil?) + + does_match = Proc.new { |obj| + + (!created_only or !obj.cloud_id.nil?) and (nofilter or ( + (mu_name and obj.mu_name and mu_name.to_s == obj.mu_name) or + (cloud_id and obj.cloud_id and cloud_id.to_s == obj.cloud_id.to_s) or + (credentials and obj.credentials and credentials.to_s == obj.credentials.to_s) and + !( + (mu_name and obj.mu_name and mu_name.to_s != obj.mu_name) or + (cloud_id and obj.cloud_id and cloud_id.to_s != obj.cloud_id.to_s) or + (credentials and obj.credentials and credentials.to_s != obj.credentials.to_s) + ) + )) + } + + @kitten_semaphore.synchronize { + return nil if !@kittens.has_key?(type) + matches = [] + + @kittens[type].each { |habitat_group, sib_classes| + next if habitat and habitat_group and habitat_group != habitat + sib_classes.each_pair { |sib_class, cloud_objs| + if attrs[:has_multiples] + next if !name.nil? and name != sib_class or cloud_objs.empty? + if !name.nil? + if return_all + return cloud_objs.dup + elsif cloud_objs.size == 1 and does_match.call(cloud_objs.values.first) + return cloud_objs.values.first + end + end + + cloud_objs.each_value { |obj| + if does_match.call(obj) + return (return_all ? cloud_objs.clone : obj.clone) + end + } + # has_multiples is false + elsif (name.nil? and does_match.call(cloud_objs)) or [sib_class, cloud_objs.virtual_name(name)].include?(name.to_s) + matches << cloud_objs.clone + end + } + } + + return matches.first if matches.size == 1 + + return matches if return_all and matches.size > 1 + } + + return nil + end + + + private + + def resolve_habitat(habitat, credentials: nil, debug: false) + return nil if habitat.nil? + if habitat.is_a?(MU::Config::Ref) and habitat.id + return habitat.id + else + realhabitat = findLitterMate(type: "habitat", name: habitat, credentials: credentials) + if realhabitat and realhabitat.mu_name + return realhabitat.cloud_id + elsif debug + MU.log "Failed to resolve habitat name #{habitat}", MU::WARN + end + end + end + + def self.generate_dummy_object(type, cloud, name, mu_name, cloud_id, desc, region, habitat, tag_value, calling_deploy, credentials) + resourceclass = MU::Cloud.loadCloudType(cloud, type) + + use_name = if (name.nil? or name.empty?) + if !mu_name.nil? + mu_name + else + guessName(desc, resourceclass, cloud_id: cloud_id, tag_value: tag_value) + end + else + name + end + + if use_name.nil? + return + end + + cfg = { + "name" => use_name, + "cloud" => cloud, + "credentials" => credentials + } + if !region.nil? and !resourceclass.isGlobal? + cfg["region"] = region + end + + if resourceclass.canLiveIn.include?(:Habitat) and habitat + cfg["project"] = habitat + end + + # If we can at least find the config from the deploy this will + # belong with, use that, even if it's an ungroomed resource. + if !calling_deploy.nil? and + !calling_deploy.original_config.nil? and + !calling_deploy.original_config[type+"s"].nil? + calling_deploy.original_config[type+"s"].each { |s| + if s["name"] == use_name + cfg = s.dup + break + end + } + + return resourceclass.new(mommacat: calling_deploy, kitten_cfg: cfg, cloud_id: cloud_id) + else + if !@@dummy_cache[type] or !@@dummy_cache[type][cfg.to_s] + newobj = resourceclass.new(mu_name: use_name, kitten_cfg: cfg, cloud_id: cloud_id, from_cloud_desc: desc) + @@desc_semaphore.synchronize { + @@dummy_cache[type] ||= {} + @@dummy_cache[type][cfg.to_s] = newobj + } + end + return @@dummy_cache[type][cfg.to_s] + end + end + private_class_method :generate_dummy_object + + def self.search_cloud_provider(type, cloud, habitats, region, cloud_id: nil, tag_key: nil, tag_value: nil, credentials: nil, flags: nil) + cloudclass = MU::Cloud.assertSupportedCloud(cloud) + resourceclass = MU::Cloud.loadCloudType(cloud, type) + + # Decide what regions we'll search, if applicable for this resource + # type. + regions = if resourceclass.isGlobal? + [nil] + else + region ? [region] : cloudclass.listRegions(credentials: credentials) + end + + # Decide what habitats (accounts/projects/subscriptions) we'll + # search, if applicable for this resource type. + habitats ||= [] + if habitats.empty? + if resourceclass.canLiveIn.include?(nil) + habitats << nil + end + if resourceclass.canLiveIn.include?(:Habitat) + habitats.concat(cloudclass.listHabitats(credentials)) + end + end + habitats << nil if habitats.empty? + habitats.uniq! + + cloud_descs = {} + + thread_waiter = Proc.new { |threads, threshold| + begin + threads.each { |t| t.join(0.1) } + threads.reject! { |t| t.nil? or !t.alive? or !t.status } + sleep 1 if threads.size > threshold + end while threads.size > threshold + } + + habitat_threads = [] + found_the_thing = false + habitats.each { |hab| + break if found_the_thing + thread_waiter.call(habitat_threads, 5) + + habitat_threads << Thread.new(hab) { |habitat| + cloud_descs[habitat] = {} + region_threads = [] + regions.each { |reg| + break if found_the_thing + region_threads << Thread.new(reg) { |r| + found = resourceclass.find(cloud_id: cloud_id, region: r, tag_key: tag_key, tag_value: tag_value, credentials: credentials, habitat: habitat, flags: flags) + + if found + @@desc_semaphore.synchronize { + cloud_descs[habitat][r] = found + } + end + # Stop if you found the thing by a specific cloud_id + if cloud_id and found and !found.empty? + found_the_thing = true + end + } + } + thread_waiter.call(region_threads, 0) + } + } + thread_waiter.call(habitat_threads, 0) + + cloud_descs + end + private_class_method :search_cloud_provider + + def self.search_my_deploys(type, deploy_id: nil, name: nil, mu_name: nil, cloud_id: nil, credentials: nil) + kittens = {} + _shortclass, _cfg_name, type, _classname, attrs = MU::Cloud.getResourceNames(type, true) + + # Check our in-memory cache of live deploys before resorting to + # metadata + littercache = nil + # Sometimes we're called inside a locked thread, sometimes not. Deal + # with locking gracefully. + begin + @@litter_semaphore.synchronize { + littercache = @@litters.dup + } + rescue ThreadError => e + raise e if !e.message.match(/recursive locking/) + littercache = @@litters.dup + end + + # First, see what we have in deploys that already happen to be loaded in + # memory. + littercache.each_pair { |cur_deploy, momma| + next if deploy_id and deploy_id != cur_deploy + + @@deploy_struct_semaphore.synchronize { + @deploy_cache[deploy_id] = { + "mtime" => Time.now, + "data" => momma.deployment + } + } + + straykitten = momma.findLitterMate(type: type, cloud_id: cloud_id, name: name, mu_name: mu_name, credentials: credentials, created_only: true) + if straykitten + MU.log "Found matching kitten #{straykitten.mu_name} in-memory - #{sprintf("%.2fs", (Time.now-start))}", MU::DEBUG + # Peace out if we found the exact resource we want + if cloud_id and straykitten.cloud_id.to_s == cloud_id.to_s + return { straykitten.cloud_id => straykitten } + elsif mu_name and straykitten.mu_name == mu_name + return { straykitten.cloud_id => straykitten } + else + kittens[straykitten.cloud_id] ||= straykitten + end + end + } + + # Now go rifle metadata from any other deploys we have on disk, if they + # weren't already there in memory. + cacheDeployMetadata(deploy_id) # freshen up @@deploy_cache + mu_descs = {} + if deploy_id.nil? + @@deploy_cache.each_key { |deploy| + next if littercache[deploy] + next if !@@deploy_cache[deploy].has_key?('data') + next if !@@deploy_cache[deploy]['data'].has_key?(type) + if !name.nil? + next if @@deploy_cache[deploy]['data'][type][name].nil? + mu_descs[deploy] ||= [] + mu_descs[deploy] << @@deploy_cache[deploy]['data'][type][name].dup + else + mu_descs[deploy] ||= [] + mu_descs[deploy].concat(@@deploy_cache[deploy]['data'][type].values) + end + } + elsif !@@deploy_cache[deploy_id].nil? + if !@@deploy_cache[deploy_id]['data'].nil? and + !@@deploy_cache[deploy_id]['data'][type].nil? + if !name.nil? and !@@deploy_cache[deploy_id]['data'][type][name].nil? + mu_descs[deploy_id] ||= [] + mu_descs[deploy_id] << @@deploy_cache[deploy_id]['data'][type][name].dup + else + mu_descs[deploy_id] = @@deploy_cache[deploy_id]['data'][type].values + end + end + end + + mu_descs.each_pair { |deploy, matches| + next if matches.nil? or matches.size == 0 + momma = MU::MommaCat.getLitter(deploy) + + # If we found exactly one match in this deploy, use its metadata to + # guess at resource names we weren't told. + straykitten = if matches.size > 1 and cloud_id + momma.findLitterMate(type: type, cloud_id: cloud_id, credentials: credentials, created_only: true) + elsif matches.size == 1 and (!attrs[:has_multiples] or matches.first.size == 1) and name.nil? and mu_name.nil? + actual_data = attrs[:has_multiples] ? matches.first.values.first : matches.first + if cloud_id.nil? + momma.findLitterMate(type: type, name: (actual_data["name"] || actual_data["MU_NODE_CLASS"]), cloud_id: actual_data["cloud_id"], credentials: credentials) + else + momma.findLitterMate(type: type, name: (actual_data["name"] || actual_data["MU_NODE_CLASS"]), cloud_id: cloud_id, credentials: credentials) + end + else + # There's more than one of this type of resource in the target + # deploy, so see if findLitterMate can narrow it down for us + momma.findLitterMate(type: type, name: name, mu_name: mu_name, cloud_id: cloud_id, credentials: credentials) + end + + next if straykitten.nil? + straykitten.intoDeploy(momma) + + if straykitten.cloud_id.nil? + MU.log "findStray: kitten #{straykitten.mu_name} came back with nil cloud_id", MU::WARN + next + end + next if cloud_id and straykitten.cloud_id.to_s != cloud_id.to_s + + # Peace out if we found the exact resource we want + if (cloud_id and straykitten.cloud_id.to_s == cloud_id.to_s) or + (mu_descs.size == 1 and matches.size == 1) or + (credentials and straykitten.credentials == credentials) +# XXX strictly speaking this last check is only valid if findStray is searching +# exactly one set of credentials + + return { straykitten.cloud_id => straykitten } + end + + kittens[straykitten.cloud_id] ||= straykitten + } + + kittens + end + private_class_method :search_my_deploys + + end #class +end #module diff --git a/modules/mu/mommacat/storage.rb b/modules/mu/mommacat/storage.rb index f0cae938a..d340acdc9 100644 --- a/modules/mu/mommacat/storage.rb +++ b/modules/mu/mommacat/storage.rb @@ -332,32 +332,17 @@ def save!(triggering_node = nil, force: false, origin: nil) Dir.mkdir(deploy_dir, 0700) end - if !origin.nil? - o_file = File.new("#{deploy_dir}/origin.json", File::CREAT|File::TRUNC|File::RDWR, 0600) - o_file.puts JSON.pretty_generate(origin) - o_file.close - end - - if !@private_key.nil? - privkey = File.new("#{deploy_dir}/private_key", File::CREAT|File::TRUNC|File::RDWR, 0600) - privkey.puts @private_key - privkey.close - end - - if !@public_key.nil? - pubkey = File.new("#{deploy_dir}/public_key", File::CREAT|File::TRUNC|File::RDWR, 0600) - pubkey.puts @public_key - pubkey.close - end + writeFile("origin.json", JSON.pretty_generate(origin)) if !origin.nil? + writeFile("private_key", @private_key) if !@private_key.nil? + writeFile("public_key", @public_key) if !@public_key.nil? if !@deployment.nil? and @deployment.size > 0 @deployment['handle'] = MU.handle if @deployment['handle'].nil? and !MU.handle.nil? - @deployment['public_key'] = @public_key - @deployment['timestamp'] ||= @timestamp - @deployment['seed'] ||= @seed - @deployment['appname'] ||= @appname - @deployment['handle'] ||= @handle - @deployment['ssh_public_key'] ||= @ssh_public_key if @ssh_public_key + [:public_key, :timestamp, :seed, :appname, :handle, :ssh_public_key].each { |var| + value = instance_variable_get(("@"+var.to_s).to_sym) + @deployment[var.to_s] = value if value + } + begin # XXX doing this to trigger JSON errors before stomping the stored # file... @@ -380,36 +365,15 @@ def save!(triggering_node = nil, force: false, origin: nil) end if !@original_config.nil? and @original_config.is_a?(Hash) - config = File.new("#{deploy_dir}/basket_of_kittens.json", File::CREAT|File::TRUNC|File::RDWR, 0600) - config.puts JSON.pretty_generate(MU::Config.manxify(@original_config)) - config.close + writeFile("basket_of_kittens.json", JSON.pretty_generate(MU::Config.manxify(@original_config))) end - if !@ssh_private_key.nil? - key = File.new("#{deploy_dir}/node_ssh.key", File::CREAT|File::TRUNC|File::RDWR, 0600) - key.puts @ssh_private_key - key.close - end - if !@ssh_public_key.nil? - key = File.new("#{deploy_dir}/node_ssh.pub", File::CREAT|File::TRUNC|File::RDWR, 0600) - key.puts @ssh_public_key - key.close - end - if !@ssh_key_name.nil? - key = File.new("#{deploy_dir}/ssh_key_name", File::CREAT|File::TRUNC|File::RDWR, 0600) - key.puts @ssh_key_name - key.close - end - if !@environment.nil? - env = File.new("#{deploy_dir}/environment_name", File::CREAT|File::TRUNC|File::RDWR, 0600) - env.puts @environment - env.close - end - if !@deploy_secret.nil? - secret = File.new("#{deploy_dir}/deploy_secret", File::CREAT|File::TRUNC|File::RDWR, 0600) - secret.print @deploy_secret - secret.close - end + writeFile("node_ssh.key", @ssh_private_key) if !@ssh_private_key.nil? + writeFile("node_ssh.pub", @ssh_public_key) if !@ssh_public_key.nil? + writeFile("ssh_key_name", @ssh_key_name) if !@ssh_key_name.nil? + writeFile("environment_name", @environment) if !@environment.nil? + writeFile("deploy_secret", @deploy_secret) if !@deploy_secret.nil? + if !@secrets.nil? secretdir = "#{deploy_dir}/secrets" if !Dir.exist?(secretdir) @@ -418,9 +382,7 @@ def save!(triggering_node = nil, force: false, origin: nil) end @secrets.each_pair { |type, servers| servers.each_pair { |server, svr_secret| - key = File.new("#{secretdir}/#{type}.#{server}", File::CREAT|File::TRUNC|File::RDWR, 0600) - key.puts svr_secret - key.close + writeFile("secrets/#{type}.#{server}", svr_secret) } } end @@ -430,146 +392,64 @@ def save!(triggering_node = nil, force: false, origin: nil) syncLitter(@deployment['servers'].keys, triggering_node: triggering_node, save_only: true) if @deployment.has_key?("servers") end - # Find one or more resources by their Mu resource name, and return - # MommaCat objects for their containing deploys, their BoK config data, - # and their deployment data. - # - # @param type [String]: The type of resource, e.g. "vpc" or "server." - # @param name [String]: The Mu resource class, typically the name field of a Basket of Kittens resource declaration. - # @param mu_name [String]: The fully-expanded Mu resource name, e.g. MGMT-PROD-2015040115-FR-ADMGMT2 - # @param deploy_id [String]: The deployment to search. Will search all deployments if not specified. - # @return [Hash,Array] - def self.getResourceMetadata(type, name: nil, deploy_id: nil, use_cache: true, mu_name: nil) - if type.nil? - raise MuError, "Can't call getResourceMetadata without a type argument" - end - _shortclass, _cfg_name, type, _classname = MU::Cloud.getResourceNames(type) - - # first, check our in-memory deploys, which may or may not have been - # written to disk yet. - littercache = nil - begin - @@litter_semaphore.synchronize { - littercache = @@litters.dup - } - rescue ThreadError => e - # already locked by a parent caller and this is a read op, so this is ok - raise e if !e.message.match(/recursive locking/) - littercache = @@litters.dup - end - littercache.each_pair { |deploy, momma| - @@deploy_struct_semaphore.synchronize { - @deploy_cache[deploy] = { - "mtime" => Time.now, - "data" => momma.deployment - } - } - } - + # Read all of our +deployment.json+ files in and stick them in a hash. Used + # by search routines that just need to skim this data without loading + # entire {MU::MommaCat} objects. + def self.cacheDeployMetadata(deploy_id = nil, use_cache: false) deploy_root = File.expand_path(MU.dataDir+"/deployments") MU::MommaCat.deploy_struct_semaphore.synchronize { - if Dir.exist?(deploy_root) - Dir.entries(deploy_root).each { |deploy| - this_deploy_dir = deploy_root+"/"+deploy - next if deploy == "." or deploy == ".." or !Dir.exist?(this_deploy_dir) - next if deploy_id and deploy_id != deploy - - if !File.size?(this_deploy_dir+"/deployment.json") - MU.log "#{this_deploy_dir}/deployment.json doesn't exist, skipping when loading cache", MU::DEBUG - next - end - if @deploy_cache[deploy].nil? or !use_cache - @deploy_cache[deploy] = Hash.new - elsif @deploy_cache[deploy]['mtime'] == File.mtime("#{this_deploy_dir}/deployment.json") - MU.log "Using cached copy of deploy #{deploy} from #{@deploy_cache[deploy]['mtime']}", MU::DEBUG + @@deploy_cache ||= {} + return if !Dir.exist?(deploy_root) - next - end + Dir.entries(deploy_root).each { |deploy| + this_deploy_dir = deploy_root+"/"+deploy + this_deploy_file = this_deploy_dir+"/deployment.json" - @deploy_cache[deploy] = Hash.new if !@deploy_cache.has_key?(deploy) - MU.log "Caching deploy #{deploy}", MU::DEBUG - lock = File.open("#{this_deploy_dir}/deployment.json", File::RDONLY) - lock.flock(File::LOCK_EX) - @deploy_cache[deploy]['mtime'] = File.mtime("#{this_deploy_dir}/deployment.json") + if deploy == "." or deploy == ".." or !Dir.exist?(this_deploy_dir) or + (deploy_id and deploy_id != deploy) or + !File.size?(this_deploy_file) or + (use_cache and @@deploy_cache[deploy] and @@deploy_cache[deploy]['mtime'] == File.mtime(this_deploy_file)) + next + end - begin - @deploy_cache[deploy]['data'] = JSON.parse(File.read("#{this_deploy_dir}/deployment.json")) - lock.flock(File::LOCK_UN) - - next if @deploy_cache[deploy].nil? or @deploy_cache[deploy]['data'].nil? - # Populate some generable entries that should be in the deploy - # data. Also, bounce out if we realize we've found exactly what - # we needed already. - MU::Cloud.resource_types.values.each { |attrs| - - next if @deploy_cache[deploy]['data'][attrs[:cfg_plural]].nil? - if !attrs[:has_multiples] - @deploy_cache[deploy]['data'][attrs[:cfg_plural]].each_pair { |nodename, data| -# XXX we don't actually store node names for some resources, need to farm them -# and fix metadata -# if !mu_name.nil? and nodename == mu_name -# return { deploy => [data] } -# end - } - else - @deploy_cache[deploy]['data'][attrs[:cfg_plural]].each_pair { |node_class, nodes| - next if nodes.nil? or !nodes.is_a?(Hash) - nodes.each_pair { |nodename, data| - next if !data.is_a?(Hash) - data['#MU_NODE_CLASS'] = node_class - if !data.has_key?("cloud") # XXX kludge until old metadata gets fixed - data["cloud"] = MU::Config.defaultCloud - end - data['#MU_NAME'] = nodename - if !mu_name.nil? and nodename == mu_name - return {deploy => [data]} if deploy_id && deploy == deploy_id - end - } + @@deploy_cache[deploy] ||= {} + + MU.log "Caching deploy #{deploy}", MU::DEBUG + lock = File.open(this_deploy_file, File::RDONLY) + lock.flock(File::LOCK_EX) + @@deploy_cache[deploy]['mtime'] = File.mtime(this_deploy_file) + + begin + @@deploy_cache[deploy]['data'] = JSON.parse(File.read(this_deploy_file)) + next if @@deploy_cache[deploy]['data'].nil? + # Populate some generable entries that should be in the deploy + # data. Also, bounce out if we realize we've found exactly what + # we needed already. + MU::Cloud.resource_types.values.each { |attrs| + + next if @@deploy_cache[deploy]['data'][attrs[:cfg_plural]].nil? + if attrs[:has_multiples] + @@deploy_cache[deploy]['data'][attrs[:cfg_plural]].each_pair { |node_class, nodes| + next if nodes.nil? or !nodes.is_a?(Hash) + nodes.each_pair { |nodename, data| + next if !data.is_a?(Hash) + data['#MU_NODE_CLASS'] ||= node_class + data['#MU_NAME'] ||= nodename + data["cloud"] ||= MU::Config.defaultCloud } - end - } - rescue JSON::ParserError => e - raise MuError, "JSON parse failed on #{this_deploy_dir}/deployment.json\n\n"+File.read("#{this_deploy_dir}/deployment.json") - end + } + end + } + rescue JSON::ParserError + raise MuError, "JSON parse failed on #{this_deploy_file}\n\n"+File.read(this_deploy_file) + ensure lock.flock(File::LOCK_UN) lock.close - } - end - } - - matches = {} - - if deploy_id.nil? - @deploy_cache.each_key { |deploy| - next if !@deploy_cache[deploy].has_key?('data') - next if !@deploy_cache[deploy]['data'].has_key?(type) - if !name.nil? - next if @deploy_cache[deploy]['data'][type][name].nil? - matches[deploy] ||= [] - matches[deploy] << @deploy_cache[deploy]['data'][type][name].dup - else - matches[deploy] ||= [] - matches[deploy].concat(@deploy_cache[deploy]['data'][type].values) end } - return matches - elsif !@deploy_cache[deploy_id].nil? - if !@deploy_cache[deploy_id]['data'].nil? and - !@deploy_cache[deploy_id]['data'][type].nil? - if !name.nil? - if !@deploy_cache[deploy_id]['data'][type][name].nil? - matches[deploy_id] ||= [] - matches[deploy_id] << @deploy_cache[deploy_id]['data'][type][name].dup - else - return matches # nothing, actually - end - else - matches[deploy_id] = @deploy_cache[deploy_id]['data'][type].values - end - end - end + } - return matches + @@deploy_cache end # Get the deploy directory @@ -604,6 +484,128 @@ def self.deploy_exists?(deploy_id) end private + + def writeFile(filename, contents) + file = File.new("#{deploy_dir}/#{filename}", File::CREAT|File::TRUNC|File::RDWR, 0600) + file.puts contents + file.close + end + + # Helper for +initialize+ + def setDeploySecret + credsets = {} + MU::Cloud.resource_types.values.each { |attrs| + if !@original_config[attrs[:cfg_plural]].nil? and @original_config[attrs[:cfg_plural]].size > 0 + @original_config[attrs[:cfg_plural]].each { |resource| + + credsets[resource['cloud']] ||= [] + credsets[resource['cloud']] << resource['credentials'] + @clouds[resource['cloud']] = 0 if !@clouds.has_key?(resource['cloud']) + @clouds[resource['cloud']] = @clouds[resource['cloud']] + 1 + + } + end + } + + MU.log "Creating deploy secret for #{MU.deploy_id}" + @deploy_secret = Password.random(256) + if !@original_config['scrub_mu_isms'] and !@no_artifacts + credsets.each_pair { |cloud, creds| + creds.uniq! + cloudclass = Object.const_get("MU").const_get("Cloud").const_get(cloud) + creds.each { |credentials| + cloudclass.writeDeploySecret(@deploy_id, @deploy_secret, credentials: credentials) + } + } + end + end + + def loadObjects(delay_descriptor_load) + MU::Cloud.resource_types.each_pair { |res_type, attrs| + type = attrs[:cfg_plural] + next if !@deployment.has_key?(type) + + @deployment[type].each_pair { |res_name, data| + orig_cfg = nil + if @original_config.has_key?(type) + @original_config[type].each { |resource| + if resource["name"] == res_name + orig_cfg = resource + break + end + } + end + + # Some Server objects originated from ServerPools, get their + # configs from there + if type == "servers" and orig_cfg.nil? and + @original_config.has_key?("server_pools") + @original_config["server_pools"].each { |resource| + if resource["name"] == res_name + orig_cfg = resource + break + end + } + end + + if orig_cfg.nil? + MU.log "Failed to locate original config for #{attrs[:cfg_name]} #{res_name} in #{@deploy_id}", MU::WARN if !["firewall_rules", "databases", "storage_pools", "cache_clusters", "alarms"].include?(type) # XXX shaddap + next + end + + if orig_cfg['vpc'] and orig_cfg['vpc'].is_a?(Hash) + ref = if orig_cfg['vpc']['id'] and orig_cfg['vpc']['id'].is_a?(Hash) + orig_cfg['vpc']['id']['mommacat'] = self + MU::Config::Ref.get(orig_cfg['vpc']['id']) + else + orig_cfg['vpc']['mommacat'] = self + MU::Config::Ref.get(orig_cfg['vpc']) + end + orig_cfg['vpc'].delete('mommacat') + orig_cfg['vpc'] = ref if ref.kitten(shallow: true) + end + + begin + # Load up MU::Cloud objects for all our kittens in this deploy + orig_cfg['environment'] = @environment # not always set in old deploys + if attrs[:has_multiples] + data.keys.each { |mu_name| + attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: mu_name, delay_descriptor_load: delay_descriptor_load) + } + else + # XXX hack for old deployments, this can go away some day + if data['mu_name'].nil? + raise MuError, "Unable to find or guess a Mu name for #{res_type}: #{res_name} in #{@deploy_id}" + end + attrs[:interface].new(mommacat: self, kitten_cfg: orig_cfg, mu_name: data['mu_name'], cloud_id: data['cloud_id']) + end + rescue StandardError => e + if e.class != MU::Cloud::MuCloudResourceNotImplemented + MU.log "Failed to load an existing resource of type '#{type}' in #{@deploy_id}: #{e.inspect}", MU::WARN, details: e.backtrace + end + end + } + } + end + + # Helper for +initialize+ + def initDeployDirectory + if !Dir.exist?(MU.dataDir+"/deployments") + MU.log "Creating #{MU.dataDir}/deployments", MU::DEBUG + Dir.mkdir(MU.dataDir+"/deployments", 0700) + end + path = File.expand_path(MU.dataDir+"/deployments")+"/"+@deploy_id + if !Dir.exist?(path) + MU.log "Creating #{path}", MU::DEBUG + Dir.mkdir(path, 0700) + end + + @ssh_key_name, @ssh_private_key, @ssh_public_key = self.SSHKey + if !File.exist?(deploy_dir+"/private_key") + @private_key, @public_key = createDeployKey + end + + end ########################################################################### ########################################################################### diff --git a/modules/tests/super_simple_bok.yml b/modules/tests/super_simple_bok.yml index c42317880..069678d31 100644 --- a/modules/tests/super_simple_bok.yml +++ b/modules/tests/super_simple_bok.yml @@ -4,7 +4,7 @@ appname: smoketest parameters: - name: complexity - default: complex + default: simple - name: vpc_name required: false - name: instance_type @@ -20,8 +20,6 @@ parameters: - name: server_pools_name default: superBoK_ServerPool -<% $complexity = 'complex' %> - vpcs: - <%= include("../mu/config/vpc.yml") %>