diff --git a/hack/run-tests.sh b/hack/run-tests.sh index 330c2d102..352f90614 100755 --- a/hack/run-tests.sh +++ b/hack/run-tests.sh @@ -7,8 +7,10 @@ source hack/common.sh prefix=${DOCKER_PREFIX:-kubevirt} tag=${DOCKER_TAG:-v0.13.3} kubeconfig=${KUBECONFIG:-~/.kube/config} +oc_in_framework="oc" +virtctl_in_framework="virtctl" [ -z "$OC_PATH" ] && OC_PATH=$(command -v oc) [ -z "$KUBECTL_PATH" ] && KUBECTL_PATH=$(which kubectl) [ -z "$VIRTCTL_PATH" ] && VIRTCTL_PATH=$(which virtctl) -${TESTS_OUT_DIR}/tests.test -kubeconfig=$kubeconfig -container-tag=$tag -container-prefix=$prefix -oc-path=${OC_PATH} -kubectl-path=${KUBECTL_PATH} -virtctl-path=${VIRTCTL_PATH} ${FUNC_TEST_ARGS} +OC_IN_FRAMEWORK=$oc_in_framework VIRTCTL_IN_FRAMEWORK=$virtctl_in_framework ${TESTS_OUT_DIR}/tests.test -kubeconfig=$kubeconfig -container-tag=$tag -container-prefix=$prefix -oc-path=${OC_PATH} -kubectl-path=${KUBECTL_PATH} -virtctl-path=${VIRTCTL_PATH} ${FUNC_TEST_ARGS} diff --git a/playbooks/provider/lago/LagoInitFile.yml.j2 b/playbooks/provider/lago/LagoInitFile.yml.j2 index d6fb07ac0..10280cbcb 100644 --- a/playbooks/provider/lago/LagoInitFile.yml.j2 +++ b/playbooks/provider/lago/LagoInitFile.yml.j2 @@ -19,6 +19,7 @@ host-settings: &nodes-settings groups: [nodes] {% endif %} memory: 4096 + vcpu: 8 disks: - template_name: {{ lago_vm_image }} type: template @@ -49,6 +50,7 @@ domains: groups: [masters, nodes, etcd, nfs] {% endif %} memory: 4096 + vcpu: 8 nics: - ip: 192.168.200.2 net: lago-management-network diff --git a/roles/kubevirt_web_ui/tasks/provision.web-ui.yml b/roles/kubevirt_web_ui/tasks/provision.web-ui.yml index 7af85fd22..cb9edcdad 100644 --- a/roles/kubevirt_web_ui/tasks/provision.web-ui.yml +++ b/roles/kubevirt_web_ui/tasks/provision.web-ui.yml @@ -58,6 +58,7 @@ retries: 30 delay: 10 when: kubevirt_web_ui_version_effective != "" + ignore_errors: yes # The Web UI is deprovisioned either when # - KWebUI CR is missing @@ -70,4 +71,5 @@ retries: 30 delay: 10 when: kubevirt_web_ui_version_effective == "" + ignore_errors: yes diff --git a/tests/framework/types.go b/tests/framework/types.go index 2223fec1a..7f893c364 100644 --- a/tests/framework/types.go +++ b/tests/framework/types.go @@ -3,6 +3,7 @@ package framework import ( "errors" "io/ioutil" + "os" "strings" "time" @@ -25,6 +26,11 @@ const ( LongTimeout = time.Duration(4) * time.Minute ) +var ( + ocName = os.Getenv("OC_IN_FRAMEWORK") + virtctlName = os.Getenv("VIRTCTL_IN_FRAMEWORK") +) + // VirtualMachine can be a vm, vmi, vmirs, vmiPreset. type VirtualMachine struct { Name string @@ -39,22 +45,22 @@ type VirtualMachine struct { func (vm VirtualMachine) Create() (string, string, error) { args := []string{"create", "-f", vm.Manifest} - return ktests.RunCommandWithNS(vm.Namespace, ktests.KubeVirtOcPath, args...) + return ktests.RunCommandWithNS(vm.Namespace, ocName, args...) } func (vm VirtualMachine) Start() (string, string, error) { args := []string{"start", vm.Name} - return ktests.RunCommandWithNS(vm.Namespace, ktests.KubeVirtVirtctlPath, args...) + return ktests.RunCommandWithNS(vm.Namespace, virtctlName, args...) } func (vm VirtualMachine) Stop() (string, string, error) { args := []string{"stop", vm.Name} - return ktests.RunCommandWithNS(vm.Namespace, ktests.KubeVirtVirtctlPath, args...) + return ktests.RunCommandWithNS(vm.Namespace, virtctlName, args...) } func (vm VirtualMachine) Delete() (string, string, error) { args := []string{"delete", vm.Type, vm.Name} - return ktests.RunCommandWithNS(vm.Namespace, ktests.KubeVirtVirtctlPath, args...) + return ktests.RunCommandWithNS(vm.Namespace, ocName, args...) } func (vm VirtualMachine) IsRunning() (bool, error) { @@ -73,7 +79,7 @@ func (vm VirtualMachine) IsRunning() (bool, error) { func (vm VirtualMachine) GetVMInfo(spec string) (string, string, error) { args := []string{"get", vm.Type, vm.Name, "--template", spec} - return ktests.RunCommandWithNS(vm.Namespace, ktests.KubeVirtOcPath, args...) + return ktests.RunCommandWithNS(vm.Namespace, ocName, args...) } func (vm VirtualMachine) GetVMUID() (string, error) { @@ -100,7 +106,7 @@ func (vm VirtualMachine) ProcessTemplate() (string, error) { args = append(args, vm.TemplateParams...) - output, cmderr, err := ktests.RunCommandWithNS(NamespaceTestTemplate, ktests.KubeVirtOcPath, args...) + output, cmderr, err := ktests.RunCommandWithNS(NamespaceTestTemplate, ocName, args...) if err != nil { return "", err } diff --git a/tests/framework/util.go b/tests/framework/util.go index 5c648b3fa..7fc39c8c7 100644 --- a/tests/framework/util.go +++ b/tests/framework/util.go @@ -15,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "kubevirt.io/kubevirt/pkg/kubecli" ktests "kubevirt.io/kubevirt/tests" + "math" ) func ProcessTemplateWithParameters(srcFilePath, dstFilePath string, params ...string) string { @@ -148,3 +149,38 @@ func RemoveDataVolume(dvName string, namespace string) { err = virtCli.CdiClient().CdiV1alpha1().DataVolumes(namespace).Delete(dvName, nil) Expect(err).ToNot(HaveOccurred()) } + +func GetAvailableResources(virtClient kubecli.KubevirtClient, cpuNeeded int64, memNeeded int64) (int, int) { + nodeList := ktests.GetAllSchedulableNodes(virtClient) + cpu_limit_total, mem_limit_total := 0, 0 + + for _, node := range nodeList.Items { + cpu := node.Status.Allocatable["cpu"] + mem := node.Status.Allocatable["memory"] + available_cpu, CpuOK := (&cpu).AsInt64() + available_mem, MemOK := (&mem).AsInt64() + if CpuOK && MemOK { + cpu_limit := int(available_cpu / cpuNeeded) + mem_limit := int(available_mem / memNeeded) + cpu_limit_total += cpu_limit + mem_limit_total += mem_limit + //availableVMs += int(math.Min(float64(cpu_limit), float64(mem_limit))) + } + } + + return cpu_limit_total, mem_limit_total +} + +// Checking if the cluster can run at least one VM +func IsEnoughResources(virtClient kubecli.KubevirtClient, cpuNeeded int, memNeeded int64) (bool, int) { + cpu_limit, mem_limit := GetAvailableResources(virtClient, int64(cpuNeeded), int64(memNeeded)) + availableVMs := int(math.Min(float64(cpu_limit), float64(mem_limit))) + if availableVMs == 0 { + return false, availableVMs + + } else { + return true, availableVMs + + } + +} diff --git a/tests/framework/vnc_console.go b/tests/framework/vnc_console.go index a9e95852f..d36e72495 100644 --- a/tests/framework/vnc_console.go +++ b/tests/framework/vnc_console.go @@ -54,7 +54,7 @@ func OpenConsole(virtCli kubecli.KubevirtClient, vmiName string, vmiNamespace st }, timeout, opts...) } -func LoggedInFedoraExpecter(vmiName string, vmiNamespace string, timeout int64) (expect.Expecter, error) { +func LoggedInFedoraExpecter(vmiName string, vmiNamespace string, timeout int64, vmNameInPromt bool) (expect.Expecter, error) { virtClient, err := kubecli.GetKubevirtClient() ktests.PanicOnError(err) vmi, err := virtClient.VirtualMachineInstance(vmiNamespace).Get(vmiName, &metav1.GetOptions{}) @@ -63,10 +63,19 @@ func LoggedInFedoraExpecter(vmiName string, vmiNamespace string, timeout int64) if err != nil { return nil, err } + + loginPromt := "" + + if vmNameInPromt { + loginPromt = vmiName + " " + "login:" + } else { + loginPromt = "login:" + } + b := append([]expect.Batcher{ &expect.BSnd{S: "\n"}, &expect.BSnd{S: "\n"}, - &expect.BExp{R: "login:"}, + &expect.BExp{R: loginPromt}, &expect.BSnd{S: "fedora\n"}, &expect.BExp{R: "Password:"}, &expect.BSnd{S: "fedora\n"}, diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-no-sockets-cores-and-threads.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-no-sockets-cores-and-threads.yaml new file mode 100644 index 000000000..11c1f0e7a --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-no-sockets-cores-and-threads.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-only-cores-and-threads.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-cores-and-threads.yaml new file mode 100644 index 000000000..23d9e1db7 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-cores-and-threads.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + cores: ${{CPU_CORES}} + threads: ${{CPU_THREADS}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of cores + name: CPU_CORES + value: "2" +- description: Amount of threads + name: CPU_THREADS + value: "2" diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-only-cores.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-cores.yaml new file mode 100644 index 000000000..133745baf --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-cores.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + cores: ${{CPU_CORES}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of cores + name: CPU_CORES + value: "2" diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets-and-cores.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets-and-cores.yaml new file mode 100644 index 000000000..0cc410df4 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets-and-cores.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + cores: ${{CPU_CORES}} + sockets: ${{CPU_SOCKETS}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of cores + name: CPU_CORES + value: "2" +- description: Amount of sockets + name: CPU_SOCKETS + value: "2" + + diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets-and-threads.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets-and-threads.yaml new file mode 100644 index 000000000..1b536427b --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets-and-threads.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + sockets: ${{CPU_SOCKETS}} + threads: ${{CPU_THREADS}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of sockets + name: CPU_SOCKETS + value: "2" +- description: Amount of threads + name: CPU_THREADS + value: "2" diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets.yaml new file mode 100644 index 000000000..28b7c8012 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-sockets.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + sockets: ${{CPU_SOCKETS}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of sockets + name: CPU_SOCKETS + value: "2" diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-only-threads.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-threads.yaml new file mode 100644 index 000000000..03bc8c485 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-only-threads.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + threads: ${{CPU_THREADS}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of threads + name: CPU_THREADS + value: "2" diff --git a/tests/manifests/sockets_cores_threads/vm-template-fedora-sockets-cores-and-threads.yaml b/tests/manifests/sockets_cores_threads/vm-template-fedora-sockets-cores-and-threads.yaml new file mode 100644 index 000000000..1d6dfbf05 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vm-template-fedora-sockets-cores-and-threads.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: v1 +kind: Template +metadata: + annotations: + description: OCP KubeVirt Fedora 27 VM template + iconClass: icon-fedora + tags: kubevirt,ocp,template,linux,virtualmachine + labels: + kubevirt.io/os: fedora27 + miq.github.io/kubevirt-is-vm-template: "true" + name: vm-template-fedora +objects: +- apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachine + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + name: ${NAME} + spec: + running: false + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/os: fedora27 + spec: + domain: + cpu: + sockets: ${{CPU_SOCKETS}} + cores: ${{CPU_CORES}} + threads: ${{CPU_THREADS}} + devices: + disks: + - disk: + bus: virtio + name: registryvolume + - disk: + bus: virtio + name: cloudinitvolume + machine: + type: "" + resources: + requests: + memory: ${MEMORY} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: kubevirt/fedora-cloud-registry-disk-demo + name: registryvolume + - cloudInitNoCloud: + userData: |- + #cloud-config + password: fedora + chpasswd: { expire: False } + name: cloudinitvolume + status: {} +parameters: +- description: Name for the new VM + name: NAME +- description: Amount of memory + name: MEMORY + value: 256Mi +- description: Amount of cores + name: CPU_CORES + value: "2" +- description: Amount of threads + name: CPU_THREADS + value: "2" +- description: Amount of sockets + name: CPU_SOCKETS + value: "2" diff --git a/tests/manifests/sockets_cores_threads/vmi-case1.1.yml b/tests/manifests/sockets_cores_threads/vmi-case1.1.yml new file mode 100644 index 000000000..de5c0f6c8 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vmi-case1.1.yml @@ -0,0 +1,23 @@ +--- +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachineInstance +metadata: + labels: + special: vmi-case1.1 + name: vmi-case1.1 +spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: emptyD + machine: + type: "" + resources: + requests: + memory: 64M + volumes: + - name: emptyD + emptyDisk: + capacity: 64M diff --git a/tests/manifests/sockets_cores_threads/vmi-case1.2.1.yml b/tests/manifests/sockets_cores_threads/vmi-case1.2.1.yml new file mode 100644 index 000000000..1c974a682 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vmi-case1.2.1.yml @@ -0,0 +1,24 @@ +--- +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachineInstance +metadata: + labels: + special: vmi-case1.2.1 + name: vmi-case1.2.1 +spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: emptyD + machine: + type: "" + resources: + requests: + memory: 64M + cpu: 2 + volumes: + - name: emptyD + emptyDisk: + capacity: 64M diff --git a/tests/manifests/sockets_cores_threads/vmi-case1.2.2.yml b/tests/manifests/sockets_cores_threads/vmi-case1.2.2.yml new file mode 100644 index 000000000..40ec8d0bd --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vmi-case1.2.2.yml @@ -0,0 +1,26 @@ +--- +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachineInstance +metadata: + labels: + special: vmi-case1.2.2 + name: vmi-case1.2.2 +spec: + domain: + devices: + disks: + - disk: + bus: virtio + name: emptyD + machine: + type: "" + resources: + requests: + memory: 64M + cpu: 2 + limits: + cpu: 2 + volumes: + - name: emptyD + emptyDisk: + capacity: 64M diff --git a/tests/manifests/sockets_cores_threads/vmi-case1.2.3.yml b/tests/manifests/sockets_cores_threads/vmi-case1.2.3.yml new file mode 100644 index 000000000..a30985448 --- /dev/null +++ b/tests/manifests/sockets_cores_threads/vmi-case1.2.3.yml @@ -0,0 +1,29 @@ +--- +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachineInstance +metadata: + labels: + special: vmi-case1.2.3 + name: vmi-case1.2.3 +spec: + domain: + cpu: + dedicatedCpuPlacement: true + devices: + disks: + - disk: + bus: virtio + name: emptyD + machine: + type: "" + resources: + requests: + cpu: 4 + memory: 64M + limits: + cpu: 2 + memory: 64M + volumes: + - name: emptyD + emptyDisk: + capacity: 64M diff --git a/tests/secrets_and_cfgmap_test.go b/tests/secrets_and_cfgmap_test.go index 5357c434c..ab4fa4473 100644 --- a/tests/secrets_and_cfgmap_test.go +++ b/tests/secrets_and_cfgmap_test.go @@ -91,7 +91,7 @@ var _ = Describe("[rfe_id:384][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(podOutput_cfgMap).To(Equal(expectedOutput_cfgMap)) By("Checking mounted ConfigMap image") - expecter, err := tests.LoggedInFedoraExpecter(vmi.Name, tests.NamespaceTestDefault, 360) + expecter, err := tests.LoggedInFedoraExpecter(vmi.Name, tests.NamespaceTestDefault, 360, false) Expect(err).ToNot(HaveOccurred()) defer expecter.Close() @@ -199,7 +199,7 @@ var _ = Describe("[rfe_id:384][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(podOutput2).To(Equal(expectedPublicKey)) By("Checking mounted secrets sshkeys image") - expecter, err := tests.LoggedInFedoraExpecter(vmi.Name, tests.NamespaceTestDefault, 360) + expecter, err := tests.LoggedInFedoraExpecter(vmi.Name, tests.NamespaceTestDefault, 360, false) Expect(err).ToNot(HaveOccurred()) defer expecter.Close() diff --git a/tests/serviceaccount_test.go b/tests/serviceaccount_test.go index 4d1fa86fb..31ce7ff6d 100644 --- a/tests/serviceaccount_test.go +++ b/tests/serviceaccount_test.go @@ -66,7 +66,7 @@ var _ = Describe("[rfe_id:905][crit:medium][vendor:cnv-qe@redhat.com][level:comp Expect(podOutput).To(Equal(tests.NamespaceTestDefault)) By("Checking mounted serviceaccount image") - expecter, err := tests.LoggedInFedoraExpecter(vmi.Name, tests.NamespaceTestDefault, 360) + expecter, err := tests.LoggedInFedoraExpecter(vmi.Name, tests.NamespaceTestDefault, 360, false) Expect(err).ToNot(HaveOccurred()) defer expecter.Close() diff --git a/tests/sockets_cores_threads_test.go b/tests/sockets_cores_threads_test.go new file mode 100644 index 000000000..7887f27f9 --- /dev/null +++ b/tests/sockets_cores_threads_test.go @@ -0,0 +1,485 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2019 Red Hat, Inc. + * + */ + +package tests_test + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "os" + "strconv" + "sync" + "time" + + "github.com/google/goexpect" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + tframework "kubevirt.io/kubevirt-ansible/tests/framework" + + "kubevirt.io/kubevirt/pkg/api/v1" + "kubevirt.io/kubevirt/pkg/kubecli" + "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api" + ktests "kubevirt.io/kubevirt/tests" +) + +func parseYAMLConfig(podYAML string) (bool, int, int) { + isCPUPresent := false + var resourcesRequests int64 = 0 + var resourcesLimits int64 = 0 + + var config corev1.Pod + buf := bytes.NewBuffer([]byte(podYAML)) + decoder := k8syaml.NewYAMLOrJSONDecoder(buf, 1024) + decoder.Decode(&config) + + // Checking that fields Requests and Limits exist + if len(config.Spec.Containers[0].Resources.Requests) != 0 && len(config.Spec.Containers[0].Resources.Limits) != 0 { + + element, doesElementExist := config.Spec.Containers[0].Resources.Requests["cpu"] + if doesElementExist { + isCPUPresent = true + resourcesRequests, _ = (&element).AsInt64() + } + + element, doesElementExist = config.Spec.Containers[0].Resources.Limits["cpu"] + if doesElementExist { + isCPUPresent = true + resourcesLimits, _ = (&element).AsInt64() + } + } + + return isCPUPresent, int(resourcesRequests), int(resourcesLimits) +} + +func getYAMLFilename(sockets, cores, threads int, address_common string) string { + // 0 means parameter set to 0, 1 means parameter set to non-zero + var file_name [2][2][2]string + + file_name[0][0][0] = "vm-template-fedora-no-sockets-cores-and-threads.yaml" + file_name[1][0][0] = "vm-template-fedora-only-sockets.yaml" + file_name[0][1][0] = "vm-template-fedora-only-cores.yaml" + file_name[0][0][1] = "vm-template-fedora-only-threads.yaml" + file_name[0][1][1] = "vm-template-fedora-only-cores-and-threads.yaml" + file_name[1][0][1] = "vm-template-fedora-only-sockets-and-threads.yaml" + file_name[1][1][0] = "vm-template-fedora-only-sockets-and-cores.yaml" + file_name[1][1][1] = "vm-template-fedora-sockets-cores-and-threads.yaml" + + // Because go doesn't have ternary operators + s := 0 + if sockets > 0 { + s = 1 + } + + c := 0 + if cores > 0 { + c = 1 + } + + t := 0 + if threads > 0 { + t = 1 + } + + return address_common + file_name[s][c][t] +} + +func getArguments(vm_name string, sockets int, cores int, threads int) []string { + arguments := []string{"NAME=" + vm_name} + + if sockets != 0 { + arguments = append(arguments, "CPU_SOCKETS="+strconv.Itoa(sockets)) + } + if cores != 0 { + arguments = append(arguments, "CPU_CORES="+strconv.Itoa(cores)) + } + if threads != 0 { + arguments = append(arguments, "CPU_THREADS="+strconv.Itoa(threads)) + } + + return arguments +} + +func clean_pods(virtClient kubecli.KubevirtClient, requiredPods []*corev1.Pod) { + listOptions := metav1.ListOptions{} + podList, err := virtClient.CoreV1().Pods(ktests.NamespaceTestDefault).List(listOptions) + + Expect(err).ToNot(HaveOccurred()) + for _, item := range podList.Items { + deletePod := true + for _, internalItem := range requiredPods { + //fmt.Println("internalItem=", internalItem.Name) + if item.Name == internalItem.Name { + deletePod = false + //fmt.Println("do not delete pod=", internalItem.Name) + } + + } + if deletePod { + fmt.Println("delete pod=", item.Name) + _, _, _ = ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "delete", "pod", item.Name) + } + } +} + +var _ = Describe("[rfe_id:1443][crit:medium]vendor:cnv-qe@redhat.com][level:component]Check CPU topology inside VM", func() { + flag.Parse() + virtClient, err := kubecli.GetKubevirtClient() + ktests.PanicOnError(err) + address_common := "tests/manifests/sockets_cores_threads/" + var vmi11, vmi121, vmi122, vmi123 tframework.VirtualMachine + BeforeEach(func() { + ktests.BeforeTestCleanup() + }) + + Context("test case 1.1 Check the validity of the XML file if user didn’t set the CPU topology at all", func() { + It("[test_id:1485] testcase 1.1 Check the validity of the XML file if user didn’t set the CPU topology at all", func() { + + vmi11.Manifest = address_common + "vmi-case1.1.yml" + vmi11.Namespace = ktests.NamespaceTestDefault + + By("Creating VMI using manifest") + _, _, err := vmi11.Create() + Expect(err).ToNot(HaveOccurred()) + vmi11.Name = "vmi-case1.1" + + By("Getting VMI object") + getVMOptions := metav1.GetOptions{} + vmi, err := virtClient.VirtualMachineInstance(ktests.NamespaceTestDefault).Get(vmi11.Name, &getVMOptions) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting until VMI start") + ktests.WaitForSuccessfulVMIStart(vmi) + + By("getting pod object") + vmiPod := ktests.GetRunningPodByVirtualMachineInstance(vmi, ktests.NamespaceTestDefault) + + By("clean old pods") + var requiredPods []*corev1.Pod + requiredPods = append(requiredPods, vmiPod) + clean_pods(virtClient, requiredPods) + + By("Checking resources in the pod") + podName := vmiPod.Name + outPodYAML, _, err := ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "get", "pod", podName, "-o", "yaml") + Expect(err).ToNot(HaveOccurred()) + cpuExist, _, _ := parseYAMLConfig(outPodYAML) + + Expect(cpuExist).To(BeFalse()) + + By("Checking XML") + vmiXml, err := ktests.GetRunningVirtualMachineInstanceDomainXML( + virtClient, + vmi, + ) + + domStat := &api.DomainSpec{} + err = xml.Unmarshal([]byte(vmiXml), domStat) + Expect(err).ToNot(HaveOccurred()) + Expect(uint(domStat.VCPU.CPUs) == 1).To(BeTrue()) + + Expect(uint(domStat.CPU.Topology.Sockets) == 1).To(BeTrue()) + Expect(uint(domStat.CPU.Topology.Cores) == 1).To(BeTrue()) + Expect(uint(domStat.CPU.Topology.Threads) == 1).To(BeTrue()) + + }) + }) + + Context("test case 1.2 Check the validity of the XML file if user didn’t set the CPU topology (only standard cpus - previous releases like) ", func() { + It("[test_id:1487]testcase 1.2 Check the validity of the XML file if user didn’t set the CPU topology (only standard cpus - previous releases like) ", func() { + + By("declare variables") + vmi121.Manifest = address_common + "/vmi-case1.2.1.yml" + vmi122.Manifest = address_common + "/vmi-case1.2.2.yml" + vmi123.Manifest = address_common + "/vmi-case1.2.3.yml" + vmi121.Name = "vmi-case1.2.1" + vmi122.Name = "vmi-case1.2.2" + vmi123.Name = "vmi-case1.2.3" + vmi121.Namespace, vmi122.Namespace, vmi123.Namespace = ktests.NamespaceTestDefault, ktests.NamespaceTestDefault, ktests.NamespaceTestDefault + + By("Create VMI using manifest - only request CPU resources - VMI-2.1.1") + _, _, err := vmi121.Create() + Expect(err).ToNot(HaveOccurred()) + + By("Create VMI using manifest - request and limit cpu resources, same amount - VMI-2.2.2") + _, _, err = vmi122.Create() + Expect(err).ToNot(HaveOccurred()) + + By("create VMI using wrong manifest, request and limit has different cpu resources") + _, _, err = vmi123.Create() + Expect(err).Should(HaveOccurred()) + + By("Getting VMI object") + getVMOptions := metav1.GetOptions{} + vmi121, err := virtClient.VirtualMachineInstance(ktests.NamespaceTestDefault).Get(vmi121.Name, &getVMOptions) + Expect(err).ToNot(HaveOccurred()) + vmi122, err := virtClient.VirtualMachineInstance(ktests.NamespaceTestDefault).Get(vmi122.Name, &getVMOptions) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting until VMI121 start") + ktests.WaitForSuccessfulVMIStart(vmi121) + + By("Waiting until VMI122 start") + ktests.WaitForSuccessfulVMIStart(vmi122) + + By("getting pods objects") + vmi121Pod := ktests.GetRunningPodByVirtualMachineInstance(vmi121, ktests.NamespaceTestDefault) + vmi122Pod := ktests.GetRunningPodByVirtualMachineInstance(vmi122, ktests.NamespaceTestDefault) + + By("workaround for CI - clean old pods") + var requiredPods []*corev1.Pod + requiredPods = append(requiredPods, vmi121Pod) + requiredPods = append(requiredPods, vmi122Pod) + fmt.Println("requiredPods[0].Name=", requiredPods[0].Name, " requiredPods[1].Name=", requiredPods[1].Name) + clean_pods(virtClient, requiredPods) + + By("Checking that pod was created and has the right name") + Expect(err).ToNot(HaveOccurred()) + Expect(vmi121Pod.Name).To(HavePrefix("virt-launcher-"+vmi121.Name), "Pod's name should contain name of the VM associated with it") + Expect(vmi122Pod.Name).To(HavePrefix("virt-launcher-"+vmi122.Name), "Pod's name should contain name of the VM associated with it") + + By("get pod-s YAML files") + podName121 := vmi121Pod.Name + podName122 := vmi122Pod.Name + outPodYAML121, _, err := ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "get", "pod", podName121, "-o", "yaml") + Expect(err).ToNot(HaveOccurred()) + outPodYAML122, _, err := ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "get", "pod", podName122, "-o", "yaml") + Expect(err).ToNot(HaveOccurred()) + + By("Checking resources in the pod") + var CPUresourcesRequests, CPUresourcesLimits int + cpuExist, CPUresourcesRequests, CPUresourcesLimits := parseYAMLConfig(outPodYAML121) + Expect(cpuExist).To(BeTrue()) + Expect(CPUresourcesRequests == 2).To(BeTrue()) + Expect(CPUresourcesLimits == 0).To(BeTrue()) + cpuExist, CPUresourcesRequests, CPUresourcesLimits = parseYAMLConfig(outPodYAML122) + Expect(cpuExist).To(BeTrue()) + Expect(CPUresourcesRequests == 2).To(BeTrue()) + Expect(CPUresourcesLimits == 2).To(BeTrue()) + + By("Get vmi121 XML") + vmiXml121, err := ktests.GetRunningVirtualMachineInstanceDomainXML( + virtClient, + vmi121, + ) + + By("Get vmi122 XML") + vmiXml122, err := ktests.GetRunningVirtualMachineInstanceDomainXML( + virtClient, + vmi122, + ) + + By("vmi unmarshal") + domStat121 := &api.DomainSpec{} + domStat122 := &api.DomainSpec{} + err = xml.Unmarshal([]byte(vmiXml121), domStat121) + Expect(err).ToNot(HaveOccurred()) + + err = xml.Unmarshal([]byte(vmiXml122), domStat122) + Expect(err).ToNot(HaveOccurred()) + + By("check XML") + + By("check XML - vmi121") + Expect(uint(domStat121.VCPU.CPUs) == 2).To(BeTrue()) + Expect(uint(domStat121.CPU.Topology.Sockets) == 2).To(BeTrue()) + Expect(uint(domStat121.CPU.Topology.Cores) == 1).To(BeTrue()) + Expect(uint(domStat121.CPU.Topology.Threads) == 1).To(BeTrue()) + + By("check XML - vmi122") + Expect(uint(domStat122.VCPU.CPUs) == 2).To(BeTrue()) + Expect(uint(domStat122.CPU.Topology.Sockets) == 2).To(BeTrue()) + Expect(uint(domStat122.CPU.Topology.Cores) == 1).To(BeTrue()) + Expect(uint(domStat122.CPU.Topology.Threads) == 1).To(BeTrue()) + + By("Clean old pods") + var cleanPods []*corev1.Pod + clean_pods(virtClient, cleanPods) + + }) + }) + + Context("test cases 1.3,2.1,2.2 Check the validity of the XML file if user didn’t set the CPU topology at all", func() { + It("[test_id:1488] [test_id:1490] [test_id:1489] [test_id:1488] testcases 1.3,2.1,2.2 Check the validity of the XML file if user didn’t set the CPU topology at all", func() { + + var wg sync.WaitGroup + vm_index := 0 + + By("Declare goroutine function") + runVM := func(socketsN int, coresN int, threadsN int, vmi *v1.VirtualMachineInstance, wg *sync.WaitGroup, virtRawVMFilePath string, vm_name string) { + By("1.3 Starting gouroutine to create, start and test VM") + wg.Add(1) + defer wg.Done() + + By("1.3 Checking that pod was created and has the right name") + vmiPod_vmNumName := ktests.GetRunningPodByVirtualMachineInstance(vmi, ktests.NamespaceTestDefault) + podName := vmiPod_vmNumName.Name + Expect(podName).To(HavePrefix("virt-launcher-"+vm_name), "Pod's name should contain name of the VM associated with it") + + By("1.3 Checking resources in the pod") + outPodYAML, _, err := ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "get", "pod", podName, "-o", "yaml") + Expect(err).ToNot(HaveOccurred()) + cpuExist, _, _ := parseYAMLConfig(outPodYAML) + Expect(cpuExist).To(BeFalse(), "YAML should have CPUs") + + By("1.3 Get VMI XML") + vmiXml, err := ktests.GetRunningVirtualMachineInstanceDomainXML(virtClient, vmi) + Expect(err).ToNot(HaveOccurred()) + + By("1.3 VMI Unmarshal") + domStat := &api.DomainSpec{} + err = xml.Unmarshal([]byte(vmiXml), domStat) + Expect(err).ToNot(HaveOccurred()) + + XMLSockets := socketsN + XMLCores := coresN + XMLThreads := threadsN + // If CPU cores, sockets or threads set to 0, XML should have 1 for this parameter + if socketsN == 0 { + XMLSockets = 1 + } + + if coresN == 0 { + XMLCores = 1 + } + + if threadsN == 0 { + XMLThreads = 1 + } + + By("1.3 Checking XML topology") + Expect(int(domStat.CPU.Topology.Sockets) == XMLSockets).To(BeTrue(), "XML should have right number of sockets") + Expect(int(domStat.CPU.Topology.Cores) == XMLCores).To(BeTrue(), "XML should have right number of cores") + Expect(int(domStat.CPU.Topology.Threads) == XMLThreads).To(BeTrue(), "XML should have right number of threads") + + By("1.3 Checking the amount of vCPU") + vCPUAmount := XMLSockets * XMLCores * XMLThreads + Expect(int(domStat.VCPU.CPUs) == vCPUAmount).To(BeTrue(), "XML should have right number of vCPUs") + + By("2.1 Expecting the VirtualMachineInstance console") + expecter, err := tframework.LoggedInFedoraExpecter(vm_name, ktests.NamespaceTestDefault, 380, true) + Expect(err).ToNot(HaveOccurred(), "Console should be started") + defer expecter.Close() + + By("2.2 Checking the number of sockets in guest OS") + _, err = expecter.ExpectBatch([]expect.Batcher{ + &expect.BSnd{S: "lscpu | grep Socket | awk '{print $2}'\n"}, + &expect.BExp{R: strconv.Itoa(XMLSockets)}, + }, 60*time.Second) + Expect(err).ToNot(HaveOccurred(), "Should report number of sockets") + + By("2.2 Checking the number of cores in guest OS") + _, err = expecter.ExpectBatch([]expect.Batcher{ + &expect.BSnd{S: "lscpu | grep Core | awk '{print $4}'\n"}, + &expect.BExp{R: strconv.Itoa(XMLCores)}, + }, 60*time.Second) + Expect(err).ToNot(HaveOccurred(), "Should report number of cores") + + By("2.2 Checking the number of threads in guest OS") + _, err = expecter.ExpectBatch([]expect.Batcher{ + &expect.BSnd{S: "lscpu | grep Thread | awk '{print $4}'\n"}, + &expect.BExp{R: strconv.Itoa(XMLThreads)}, + }, 60*time.Second) + Expect(err).ToNot(HaveOccurred(), "Should report number of threads") + + By("Deleting VM") + _, _, _ = ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "delete", "vm", vm_name) + Expect(err).ToNot(HaveOccurred()) + By("Deleting VM manifest") + err = os.Remove(virtRawVMFilePath) + Expect(err).ToNot(HaveOccurred()) + } + + By("main cycle") + for sockets := 0; sockets < 3; sockets++ { + for cores := 0; cores < 3; cores++ { + for threads := 0; threads < 3; threads++ { + + By("1.3 check resources in the cluster") + cpuNeeded := 1 + if sockets > 0 { + cpuNeeded *= sockets + } + if cores > 0 { + cpuNeeded *= cores + } + if threads > 0 { + cpuNeeded *= threads + } + const memNeeded int64 = 256 * 1024 * 1024 // 256mb is default in the template + + const maxWaitIterations = 15 // half of minute + isAvailable := true + + for i := 0; i < maxWaitIterations; i++ { + IsResourcesInCluster, amountVMs := tframework.IsEnoughResources(virtClient, cpuNeeded, memNeeded) + fmt.Println("================================") + fmt.Println("debug in a case if goroutine fails. Amount of possible VMs=", amountVMs) + fmt.Println("================================") + if IsResourcesInCluster { + isAvailable = true + break + } + time.Sleep(2 * time.Second) + } + + if !isAvailable { + fmt.Println("cluster doesn't have resources to launch this VM! Try to launch next option") + break + } + + vm_name := "vm13-" + strconv.Itoa(vm_index) + vm_index++ + + filename := getYAMLFilename(sockets, cores, threads, address_common) + arguments := getArguments(vm_name, sockets, cores, threads) + + virtRawVMFilePath := address_common + "/sockets_cores_threads_raw_manifest_" + vm_name + ".yaml" + tframework.ProcessTemplateWithParameters(filename, virtRawVMFilePath, arguments...) + + By("1.3 Create VM from template and launch it") + tframework.CreateResourceWithFilePathTestNamespace(virtRawVMFilePath) + _, _, err := ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "virtctl", "start", vm_name) + Expect(err).ToNot(HaveOccurred()) + _, _, err = ktests.RunCommandWithNS(ktests.NamespaceTestDefault, "oc", "project", ktests.NamespaceTestDefault) + Expect(err).ToNot(HaveOccurred()) + + By("1.3 Getting VMI object") + getVMOptions := metav1.GetOptions{} + vmi, err := virtClient.VirtualMachineInstance(ktests.NamespaceTestDefault).Get(vm_name, &getVMOptions) + Expect(err).ToNot(HaveOccurred()) + ktests.WaitForSuccessfulVMIStart(vmi) + + By("launch goroutine") + fmt.Println("launch goroutine for vm ", vm_name) + go runVM(sockets, cores, threads, vmi, &wg, virtRawVMFilePath, vm_name) + + } + } + } + wg.Wait() + }) + }) + +}) diff --git a/tests/vm_with_sidecar_hook_test.go b/tests/vm_with_sidecar_hook_test.go index ad07b4f8a..2fb66b310 100644 --- a/tests/vm_with_sidecar_hook_test.go +++ b/tests/vm_with_sidecar_hook_test.go @@ -33,7 +33,7 @@ var _ = Describe("[rfe_id:839][crit:medium][vendor:cnv-qe@redhat.com][level:comp tests.WaitUntilResourceReadyByNameTestNamespace("vmi", vmiName, "-o=jsonpath='{.status.phase}'", "Running") By("Expecting console") - expecter, err := tests.LoggedInFedoraExpecter(vmiName, tests.NamespaceTestDefault, 720) + expecter, err := tests.LoggedInFedoraExpecter(vmiName, tests.NamespaceTestDefault, 720, false) Expect(err).ToNot(HaveOccurred()) defer expecter.Close() diff --git a/vars/all.yml b/vars/all.yml index e24266aa1..cbe1436cb 100644 --- a/vars/all.yml +++ b/vars/all.yml @@ -3,12 +3,12 @@ platform: openshift apb_action: "provision" # OpenShift # -kubevirt_openshift_version: "3.10" +kubevirt_openshift_version: "3.11" openshift_ansible_dir: "openshift-ansible/" openshift_playbook_path: "{{ openshift_ansible_dir }}/{{ 'playbooks/byo/config.yml' if kubevirt_openshift_version == '3.7' else 'playbooks/deploy_cluster.yml' }}" ### KubeVirt ### -version: 0.13.3 +version: 0.16.2 image_pull_policy: IfNotPresent deploy_demo: true @@ -26,7 +26,7 @@ deploy_skydive: false ### Web UI ## kubevirt_web_ui_operator_image_tag: "latest" kubevirt_web_ui_branding: "okdvirt" -kubevirt_web_ui_version: "v1.4.0-13" +kubevirt_web_ui_version: "v2.0.0-14.4" #kubevirt_web_ui_version: "" #