summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/virt/kvm/api.rst1
-rw-r--r--Documentation/virt/kvm/locking.rst2
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/kvm/vz.c2
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--virt/kvm/kvm_main.c26
7 files changed, 19 insertions, 20 deletions
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index e54fdfaa75e2..e0c63de8e837 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -7330,6 +7330,7 @@ and injected exceptions.
will clear DR6.RTM.
7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
+--------------------------------------
:Architectures: x86, arm64, mips
:Parameters: args[0] whether feature should be enabled or not
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 14c4e9fa501d..8c77554e4896 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -21,7 +21,7 @@ The acquisition orders for mutexes are as follows:
- kvm->mn_active_invalidate_count ensures that pairs of
invalidate_range_start() and invalidate_range_end() callbacks
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
- are taken on the waiting side in install_new_memslots, so MMU notifiers
+ are taken on the waiting side when modifying memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock.
For SRCU:
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2803c9c21ef9..957121a495f0 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -757,7 +757,7 @@ struct kvm_mips_callbacks {
int (*vcpu_run)(struct kvm_vcpu *vcpu);
void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
};
-extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+extern const struct kvm_mips_callbacks * const kvm_mips_callbacks;
int kvm_mips_emulation_init(void);
/* Debug: dump vcpu state */
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index dafab003ea0d..3d21cbfa7443 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -3305,7 +3305,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
};
/* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
-struct kvm_mips_callbacks *kvm_mips_callbacks = &kvm_vz_callbacks;
+const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks;
int kvm_mips_emulation_init(void)
{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 90edc16d37e5..9696c2fb30e9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -58,7 +58,7 @@
/*
* Bit 63 of the memslot generation number is an "update in-progress flag",
- * e.g. is temporarily set for the duration of install_new_memslots().
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
* This flag effectively creates a unique generation number that is used to
* mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
* i.e. may (or may not) have come from the previous memslots generation.
@@ -713,7 +713,7 @@ struct kvm {
* use by the VM. To be used under the slots_lock (above) or in a
* kvm->srcu critical section where acquiring the slots_lock would
* lead to deadlock with the synchronize_srcu in
- * install_new_memslots.
+ * kvm_swap_active_memslots().
*/
struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 2728d49bbdf6..6f4737d5046a 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -91,11 +91,11 @@ struct gfn_to_pfn_cache {
* is topped up (__kvm_mmu_topup_memory_cache()).
*/
struct kvm_mmu_memory_cache {
- int nobjs;
gfp_t gfp_zero;
gfp_t gfp_custom;
struct kmem_cache *kmem_cache;
int capacity;
+ int nobjs;
void **objects;
};
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f40b72eb0e7b..d1abb331ea68 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1298,7 +1298,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
* At this point, pending calls to invalidate_range_start()
* have completed but no more MMU notifiers will run, so
* mn_active_invalidate_count may remain unbalanced.
- * No threads can be waiting in install_new_memslots as the
+ * No threads can be waiting in kvm_swap_active_memslots() as the
* last reference on KVM has been dropped, but freeing
* memslots would deadlock without this manual intervention.
*/
@@ -1742,13 +1742,13 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
kvm_arch_flush_shadow_memslot(kvm, old);
kvm_arch_guest_memory_reclaimed(kvm);
- /* Was released by kvm_swap_active_memslots, reacquire. */
+ /* Was released by kvm_swap_active_memslots(), reacquire. */
mutex_lock(&kvm->slots_arch_lock);
/*
* Copy the arch-specific field of the newly-installed slot back to the
* old slot as the arch data could have changed between releasing
- * slots_arch_lock in install_new_memslots() and re-acquiring the lock
+ * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
* above. Writers are required to retrieve memslots *after* acquiring
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
*/
@@ -1810,11 +1810,11 @@ static int kvm_set_memslot(struct kvm *kvm,
int r;
/*
- * Released in kvm_swap_active_memslots.
+ * Released in kvm_swap_active_memslots().
*
- * Must be held from before the current memslots are copied until
- * after the new memslots are installed with rcu_assign_pointer,
- * then released before the synchronize srcu in kvm_swap_active_memslots.
+ * Must be held from before the current memslots are copied until after
+ * the new memslots are installed with rcu_assign_pointer, then
+ * released before the synchronize srcu in kvm_swap_active_memslots().
*
* When modifying memslots outside of the slots_lock, must be held
* before reading the pointer to the current memslots until after all
@@ -3866,7 +3866,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
static int vcpu_get_pid(void *data, u64 *val)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ struct kvm_vcpu *vcpu = data;
*val = pid_nr(rcu_access_pointer(vcpu->pid));
return 0;
}
@@ -5572,8 +5572,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
const char *fmt)
{
int ret;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
- inode->i_private;
+ struct kvm_stat_data *stat_data = inode->i_private;
/*
* The debugfs files are a reference to the kvm struct which
@@ -5594,8 +5593,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
static int kvm_debugfs_release(struct inode *inode, struct file *file)
{
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
- inode->i_private;
+ struct kvm_stat_data *stat_data = inode->i_private;
simple_attr_release(inode, file);
kvm_put_kvm(stat_data->kvm);
@@ -5644,7 +5642,7 @@ static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
static int kvm_stat_data_get(void *data, u64 *val)
{
int r = -EFAULT;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_stat_data *stat_data = data;
switch (stat_data->kind) {
case KVM_STAT_VM:
@@ -5663,7 +5661,7 @@ static int kvm_stat_data_get(void *data, u64 *val)
static int kvm_stat_data_clear(void *data, u64 val)
{
int r = -EFAULT;
- struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_stat_data *stat_data = data;
if (val)
return -EINVAL;