Skip to content

Commit

Permalink
KVM: SEV: Refactor out sev_es_state struct
Browse files Browse the repository at this point in the history
Move SEV-ES vCPU metadata into new sev_es_state struct from vcpu_svm.

Signed-off-by: Peter Gonda <[email protected]>
Suggested-by: Tom Lendacky <[email protected]>
Acked-by: Tom Lendacky <[email protected]>
Reviewed-by: Sean Christopherson <[email protected]>
Cc: Marc Orr <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Dr. David Alan Gilbert <[email protected]>
Cc: Brijesh Singh <[email protected]>
Cc: Tom Lendacky <[email protected]>
Cc: Vitaly Kuznetsov <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Jim Mattson <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: [email protected]
Cc: [email protected]
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
pgonda authored and bonzini committed Nov 11, 2021
1 parent 52cf891 commit b67a4cc
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 56 deletions.
83 changes: 42 additions & 41 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -590,7 +590,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
* traditional VMSA as it has been built so far (in prep
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
*/
memcpy(svm->vmsa, save, sizeof(*save));
memcpy(svm->sev_es.vmsa, save, sizeof(*save));

return 0;
}
Expand All @@ -612,11 +612,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
* the VMSA memory content (i.e it will write the same memory region
* with the guest's key), so invalidate it first.
*/
clflush_cache_range(svm->vmsa, PAGE_SIZE);
clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);

vmsa.reserved = 0;
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
vmsa.address = __sme_pa(svm->vmsa);
vmsa.address = __sme_pa(svm->sev_es.vmsa);
vmsa.len = PAGE_SIZE;
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
}
Expand Down Expand Up @@ -2026,16 +2026,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu);

if (vcpu->arch.guest_state_protected)
sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
__free_page(virt_to_page(svm->vmsa));
sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
__free_page(virt_to_page(svm->sev_es.vmsa));

if (svm->ghcb_sa_free)
kfree(svm->ghcb_sa);
if (svm->sev_es.ghcb_sa_free)
kfree(svm->sev_es.ghcb_sa);
}

static void dump_ghcb(struct vcpu_svm *svm)
{
struct ghcb *ghcb = svm->ghcb;
struct ghcb *ghcb = svm->sev_es.ghcb;
unsigned int nbits;

/* Re-use the dump_invalid_vmcb module parameter */
Expand All @@ -2061,7 +2061,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
struct ghcb *ghcb = svm->ghcb;
struct ghcb *ghcb = svm->sev_es.ghcb;

/*
* The GHCB protocol so far allows for the following data
Expand All @@ -2081,7 +2081,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu;
struct ghcb *ghcb = svm->ghcb;
struct ghcb *ghcb = svm->sev_es.ghcb;
u64 exit_code;

/*
Expand Down Expand Up @@ -2128,7 +2128,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
struct ghcb *ghcb;
u64 exit_code = 0;

ghcb = svm->ghcb;
ghcb = svm->sev_es.ghcb;

/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage)
Expand Down Expand Up @@ -2246,33 +2246,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)

void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{
if (!svm->ghcb)
if (!svm->sev_es.ghcb)
return;

if (svm->ghcb_sa_free) {
if (svm->sev_es.ghcb_sa_free) {
/*
* The scratch area lives outside the GHCB, so there is a
* buffer that, depending on the operation performed, may
* need to be synced, then freed.
*/
if (svm->ghcb_sa_sync) {
if (svm->sev_es.ghcb_sa_sync) {
kvm_write_guest(svm->vcpu.kvm,
ghcb_get_sw_scratch(svm->ghcb),
svm->ghcb_sa, svm->ghcb_sa_len);
svm->ghcb_sa_sync = false;
ghcb_get_sw_scratch(svm->sev_es.ghcb),
svm->sev_es.ghcb_sa,
svm->sev_es.ghcb_sa_len);
svm->sev_es.ghcb_sa_sync = false;
}

kfree(svm->ghcb_sa);
svm->ghcb_sa = NULL;
svm->ghcb_sa_free = false;
kfree(svm->sev_es.ghcb_sa);
svm->sev_es.ghcb_sa = NULL;
svm->sev_es.ghcb_sa_free = false;
}

trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);

sev_es_sync_to_ghcb(svm);

kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
svm->ghcb = NULL;
kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
svm->sev_es.ghcb = NULL;
}

void pre_sev_run(struct vcpu_svm *svm, int cpu)
Expand Down Expand Up @@ -2302,7 +2303,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct ghcb *ghcb = svm->ghcb;
struct ghcb *ghcb = svm->sev_es.ghcb;
u64 ghcb_scratch_beg, ghcb_scratch_end;
u64 scratch_gpa_beg, scratch_gpa_end;
void *scratch_va;
Expand Down Expand Up @@ -2338,7 +2339,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
return false;
}

scratch_va = (void *)svm->ghcb;
scratch_va = (void *)svm->sev_es.ghcb;
scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
} else {
/*
Expand Down Expand Up @@ -2368,12 +2369,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
* the vCPU next time (i.e. a read was requested so the data
* must be written back to the guest memory).
*/
svm->ghcb_sa_sync = sync;
svm->ghcb_sa_free = true;
svm->sev_es.ghcb_sa_sync = sync;
svm->sev_es.ghcb_sa_free = true;
}

svm->ghcb_sa = scratch_va;
svm->ghcb_sa_len = len;
svm->sev_es.ghcb_sa = scratch_va;
svm->sev_es.ghcb_sa_len = len;

return true;
}
Expand Down Expand Up @@ -2492,15 +2493,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
return -EINVAL;
}

if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
return -EINVAL;
}

svm->ghcb = svm->ghcb_map.hva;
ghcb = svm->ghcb_map.hva;
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
ghcb = svm->sev_es.ghcb_map.hva;

trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);

Expand All @@ -2523,7 +2524,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_read(vcpu,
control->exit_info_1,
control->exit_info_2,
svm->ghcb_sa);
svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_MMIO_WRITE:
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
Expand All @@ -2532,7 +2533,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_write(vcpu,
control->exit_info_1,
control->exit_info_2,
svm->ghcb_sa);
svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
Expand Down Expand Up @@ -2582,8 +2583,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
return -EINVAL;

return kvm_sev_es_string_io(&svm->vcpu, size, port,
svm->ghcb_sa, svm->ghcb_sa_len / size, in);
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
svm->sev_es.ghcb_sa_len / size, in);
}

void sev_es_init_vmcb(struct vcpu_svm *svm)
Expand All @@ -2598,7 +2599,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
* VMCB page. Do not include the encryption mask on the VMSA physical
* address since hardware will access it using the guest key.
*/
svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);

/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
Expand Down Expand Up @@ -2670,8 +2671,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
struct vcpu_svm *svm = to_svm(vcpu);

/* First SIPI: Use the values as initially set by the VMM */
if (!svm->received_first_sipi) {
svm->received_first_sipi = true;
if (!svm->sev_es.received_first_sipi) {
svm->sev_es.received_first_sipi = true;
return;
}

Expand All @@ -2680,8 +2681,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
* non-zero value.
*/
if (!svm->ghcb)
if (!svm->sev_es.ghcb)
return;

ghcb_set_sw_exit_info_2(svm->ghcb, 1);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
}
8 changes: 4 additions & 4 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1450,7 +1450,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_switch_vmcb(svm, &svm->vmcb01);

if (vmsa_page)
svm->vmsa = page_address(vmsa_page);
svm->sev_es.vmsa = page_address(vmsa_page);

svm->guest_state_loaded = false;

Expand Down Expand Up @@ -2833,11 +2833,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
return kvm_complete_insn_gp(vcpu, err);

ghcb_set_sw_exit_info_1(svm->ghcb, 1);
ghcb_set_sw_exit_info_2(svm->ghcb,
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
X86_TRAP_GP |
SVM_EVTINJ_TYPE_EXEPT |
SVM_EVTINJ_VALID);
Expand Down
26 changes: 15 additions & 11 deletions arch/x86/kvm/svm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,20 @@ struct svm_nested_state {
bool initialized;
};

struct vcpu_sev_es_state {
/* SEV-ES support */
struct vmcb_save_area *vmsa;
struct ghcb *ghcb;
struct kvm_host_map ghcb_map;
bool received_first_sipi;

/* SEV-ES scratch area support */
void *ghcb_sa;
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
};

struct vcpu_svm {
struct kvm_vcpu vcpu;
/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
Expand Down Expand Up @@ -186,17 +200,7 @@ struct vcpu_svm {
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
} shadow_msr_intercept;

/* SEV-ES support */
struct vmcb_save_area *vmsa;
struct ghcb *ghcb;
struct kvm_host_map ghcb_map;
bool received_first_sipi;

/* SEV-ES scratch area support */
void *ghcb_sa;
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
struct vcpu_sev_es_state sev_es;

bool guest_state_loaded;
};
Expand Down

0 comments on commit b67a4cc

Please sign in to comment.