Skip to content

Commit

Permalink
KVM: x86: Move guts of kvm_arch_init() to standalone helper
Browse files Browse the repository at this point in the history
Move the guts of kvm_arch_init() to a new helper, kvm_x86_vendor_init(),
so that VMX can do _all_ arch and vendor initialization before calling
kvm_init().  Calling kvm_init() must be the _very_ last step during init,
as kvm_init() exposes /dev/kvm to userspace, i.e. allows creating VMs.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221130230934.1014142-14-seanjc@google.com
  • Loading branch information
sean-jc authored and jialeif committed Mar 21, 2023
1 parent 6ab460a commit 31620bb
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 10 deletions.
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1760,6 +1760,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
#include <asm/kvm-x86-ops.h>

int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
void kvm_x86_vendor_exit(void);

#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
Expand Down
23 changes: 21 additions & 2 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -5097,15 +5097,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {

static int __init svm_init(void)
{
int r;

__unused_size_checks();

return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
r = kvm_x86_vendor_init(&svm_init_ops);
if (r)
return r;

/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
*/
r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
if (r)
goto err_kvm_init;

return 0;

err_kvm_init:
kvm_x86_vendor_exit();
return r;
}

static void __exit svm_exit(void)
{
kvm_exit();
kvm_x86_vendor_exit();
}

module_init(svm_init)
Expand Down
21 changes: 15 additions & 6 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -8564,6 +8564,7 @@ static void vmx_exit(void)
#endif

kvm_exit();
kvm_x86_vendor_exit();

vmx_cleanup_l1d_flush();

Expand All @@ -8581,23 +8582,25 @@ static int __init vmx_init(void)
*/
hv_init_evmcs();

r = kvm_x86_vendor_init(&vmx_init_ops);
if (r)
return r;

r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
goto err_kvm_init;

/*
* Must be called after kvm_init() so enable_ept is properly set
* Must be called after common x86 init so enable_ept is properly set
* up. Hand the parameter mitigation value in which was stored in
* the pre module init parser. If no parameter was given, it will
* contain 'auto' which will be turned into the default 'cond'
* mitigation mode.
*/
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
if (r) {
vmx_exit();
return r;
}
if (r)
goto err_l1d_flush;

vmx_setup_fb_clear_ctrl();

Expand All @@ -8622,5 +8625,11 @@ static int __init vmx_init(void)
allow_smaller_maxphyaddr = true;

return 0;

err_l1d_flush:
vmx_exit();
err_kvm_init:
kvm_x86_vendor_exit();
return r;
}
module_init(vmx_init);
15 changes: 13 additions & 2 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -9310,7 +9310,16 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)

int kvm_arch_init(void *opaque)
{
struct kvm_x86_init_ops *ops = opaque;
return 0;
}

void kvm_arch_exit(void)
{

}

int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
u64 host_pat;
int r;

Expand Down Expand Up @@ -9442,8 +9451,9 @@ int kvm_arch_init(void *opaque)
kmem_cache_destroy(x86_emulator_cache);
return r;
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);

void kvm_arch_exit(void)
void kvm_x86_vendor_exit(void)
{
kvm_unregister_perf_callbacks();

Expand Down Expand Up @@ -9473,6 +9483,7 @@ void kvm_arch_exit(void)
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);

static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{
Expand Down

0 comments on commit 31620bb

Please sign in to comment.