mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
vfio: fix deadlock between group lock and kvm lock
After51cdc8bc12, we have another deadlock scenario between the kvm->lock and the vfio group_lock with two different codepaths acquiring the locks in different order. Specifically in vfio_open_device, vfio holds the vfio group_lock when issuing device->ops->open_device but some drivers (like vfio-ap) need to acquire kvm->lock during their open_device routine; Meanwhile, kvm_vfio_release will acquire the kvm->lock first before calling vfio_file_set_kvm which will acquire the vfio group_lock. To resolve this, let's remove the need for the vfio group_lock from the kvm_vfio_release codepath. This is done by introducing a new spinlock to protect modifications to the vfio group kvm pointer, and acquiring a kvm ref from within vfio while holding this spinlock, with the reference held until the last close for the device in question. Fixes:51cdc8bc12("kvm/vfio: Fix potential deadlock on vfio group_lock") Reported-by: Anthony Krowiak <akrowiak@linux.ibm.com> Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com> Tested-by: Tony Krowiak <akrowiak@linux.ibm.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Link: https://lore.kernel.org/r/20230203215027.151988-2-mjrosato@linux.ibm.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
committed by
Alex Williamson
parent
e592296cd6
commit
2b48f52f2b
@@ -154,6 +154,18 @@ out_unlock:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vfio_device_group_get_kvm_safe(struct vfio_device *device)
|
||||||
|
{
|
||||||
|
spin_lock(&device->group->kvm_ref_lock);
|
||||||
|
if (!device->group->kvm)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
_vfio_device_get_kvm_safe(device, device->group->kvm);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
spin_unlock(&device->group->kvm_ref_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static int vfio_device_group_open(struct vfio_device *device)
|
static int vfio_device_group_open(struct vfio_device *device)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@@ -164,13 +176,23 @@ static int vfio_device_group_open(struct vfio_device *device)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&device->dev_set->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here we pass the KVM pointer with the group under the lock. If the
|
* Before the first device open, get the KVM pointer currently
|
||||||
* device driver will use it, it must obtain a reference and release it
|
* associated with the group (if there is one) and obtain a reference
|
||||||
* during close_device.
|
* now that will be held until the open_count reaches 0 again. Save
|
||||||
|
* the pointer in the device for use by drivers.
|
||||||
*/
|
*/
|
||||||
ret = vfio_device_open(device, device->group->iommufd,
|
if (device->open_count == 0)
|
||||||
device->group->kvm);
|
vfio_device_group_get_kvm_safe(device);
|
||||||
|
|
||||||
|
ret = vfio_device_open(device, device->group->iommufd, device->kvm);
|
||||||
|
|
||||||
|
if (device->open_count == 0)
|
||||||
|
vfio_device_put_kvm(device);
|
||||||
|
|
||||||
|
mutex_unlock(&device->dev_set->lock);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&device->group->group_lock);
|
mutex_unlock(&device->group->group_lock);
|
||||||
@@ -180,7 +202,14 @@ out_unlock:
|
|||||||
void vfio_device_group_close(struct vfio_device *device)
|
void vfio_device_group_close(struct vfio_device *device)
|
||||||
{
|
{
|
||||||
mutex_lock(&device->group->group_lock);
|
mutex_lock(&device->group->group_lock);
|
||||||
|
mutex_lock(&device->dev_set->lock);
|
||||||
|
|
||||||
vfio_device_close(device, device->group->iommufd);
|
vfio_device_close(device, device->group->iommufd);
|
||||||
|
|
||||||
|
if (device->open_count == 0)
|
||||||
|
vfio_device_put_kvm(device);
|
||||||
|
|
||||||
|
mutex_unlock(&device->dev_set->lock);
|
||||||
mutex_unlock(&device->group->group_lock);
|
mutex_unlock(&device->group->group_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,6 +479,7 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
|
|||||||
|
|
||||||
refcount_set(&group->drivers, 1);
|
refcount_set(&group->drivers, 1);
|
||||||
mutex_init(&group->group_lock);
|
mutex_init(&group->group_lock);
|
||||||
|
spin_lock_init(&group->kvm_ref_lock);
|
||||||
INIT_LIST_HEAD(&group->device_list);
|
INIT_LIST_HEAD(&group->device_list);
|
||||||
mutex_init(&group->device_lock);
|
mutex_init(&group->device_lock);
|
||||||
group->iommu_group = iommu_group;
|
group->iommu_group = iommu_group;
|
||||||
@@ -803,9 +833,9 @@ void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
|
|||||||
if (!vfio_file_is_group(file))
|
if (!vfio_file_is_group(file))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&group->group_lock);
|
spin_lock(&group->kvm_ref_lock);
|
||||||
group->kvm = kvm;
|
group->kvm = kvm;
|
||||||
mutex_unlock(&group->group_lock);
|
spin_unlock(&group->kvm_ref_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
|
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
|
||||||
|
|
||||||
|
|||||||
@@ -74,6 +74,7 @@ struct vfio_group {
|
|||||||
struct file *opened_file;
|
struct file *opened_file;
|
||||||
struct blocking_notifier_head notifier;
|
struct blocking_notifier_head notifier;
|
||||||
struct iommufd_ctx *iommufd;
|
struct iommufd_ctx *iommufd;
|
||||||
|
spinlock_t kvm_ref_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
int vfio_device_set_group(struct vfio_device *device,
|
int vfio_device_set_group(struct vfio_device *device,
|
||||||
@@ -244,4 +245,18 @@ extern bool vfio_noiommu __read_mostly;
|
|||||||
enum { vfio_noiommu = false };
|
enum { vfio_noiommu = false };
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_KVM
|
||||||
|
void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
|
||||||
|
void vfio_device_put_kvm(struct vfio_device *device);
|
||||||
|
#else
|
||||||
|
static inline void _vfio_device_get_kvm_safe(struct vfio_device *device,
|
||||||
|
struct kvm *kvm)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void vfio_device_put_kvm(struct vfio_device *device)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -16,6 +16,9 @@
|
|||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
|
#ifdef CONFIG_HAVE_KVM
|
||||||
|
#include <linux/kvm_host.h>
|
||||||
|
#endif
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@@ -338,6 +341,55 @@ void vfio_unregister_group_dev(struct vfio_device *device)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
|
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_KVM
|
||||||
|
void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
|
||||||
|
{
|
||||||
|
void (*pfn)(struct kvm *kvm);
|
||||||
|
bool (*fn)(struct kvm *kvm);
|
||||||
|
bool ret;
|
||||||
|
|
||||||
|
lockdep_assert_held(&device->dev_set->lock);
|
||||||
|
|
||||||
|
pfn = symbol_get(kvm_put_kvm);
|
||||||
|
if (WARN_ON(!pfn))
|
||||||
|
return;
|
||||||
|
|
||||||
|
fn = symbol_get(kvm_get_kvm_safe);
|
||||||
|
if (WARN_ON(!fn)) {
|
||||||
|
symbol_put(kvm_put_kvm);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = fn(kvm);
|
||||||
|
symbol_put(kvm_get_kvm_safe);
|
||||||
|
if (!ret) {
|
||||||
|
symbol_put(kvm_put_kvm);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
device->put_kvm = pfn;
|
||||||
|
device->kvm = kvm;
|
||||||
|
}
|
||||||
|
|
||||||
|
void vfio_device_put_kvm(struct vfio_device *device)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&device->dev_set->lock);
|
||||||
|
|
||||||
|
if (!device->kvm)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (WARN_ON(!device->put_kvm))
|
||||||
|
goto clear;
|
||||||
|
|
||||||
|
device->put_kvm(device->kvm);
|
||||||
|
device->put_kvm = NULL;
|
||||||
|
symbol_put(kvm_put_kvm);
|
||||||
|
|
||||||
|
clear:
|
||||||
|
device->kvm = NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* true if the vfio_device has open_device() called but not close_device() */
|
/* true if the vfio_device has open_device() called but not close_device() */
|
||||||
static bool vfio_assert_device_open(struct vfio_device *device)
|
static bool vfio_assert_device_open(struct vfio_device *device)
|
||||||
{
|
{
|
||||||
@@ -361,7 +413,6 @@ static int vfio_device_first_open(struct vfio_device *device,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_module_put;
|
goto err_module_put;
|
||||||
|
|
||||||
device->kvm = kvm;
|
|
||||||
if (device->ops->open_device) {
|
if (device->ops->open_device) {
|
||||||
ret = device->ops->open_device(device);
|
ret = device->ops->open_device(device);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -370,7 +421,6 @@ static int vfio_device_first_open(struct vfio_device *device,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_unuse_iommu:
|
err_unuse_iommu:
|
||||||
device->kvm = NULL;
|
|
||||||
if (iommufd)
|
if (iommufd)
|
||||||
vfio_iommufd_unbind(device);
|
vfio_iommufd_unbind(device);
|
||||||
else
|
else
|
||||||
@@ -387,7 +437,6 @@ static void vfio_device_last_close(struct vfio_device *device,
|
|||||||
|
|
||||||
if (device->ops->close_device)
|
if (device->ops->close_device)
|
||||||
device->ops->close_device(device);
|
device->ops->close_device(device);
|
||||||
device->kvm = NULL;
|
|
||||||
if (iommufd)
|
if (iommufd)
|
||||||
vfio_iommufd_unbind(device);
|
vfio_iommufd_unbind(device);
|
||||||
else
|
else
|
||||||
@@ -400,14 +449,14 @@ int vfio_device_open(struct vfio_device *device,
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&device->dev_set->lock);
|
lockdep_assert_held(&device->dev_set->lock);
|
||||||
|
|
||||||
device->open_count++;
|
device->open_count++;
|
||||||
if (device->open_count == 1) {
|
if (device->open_count == 1) {
|
||||||
ret = vfio_device_first_open(device, iommufd, kvm);
|
ret = vfio_device_first_open(device, iommufd, kvm);
|
||||||
if (ret)
|
if (ret)
|
||||||
device->open_count--;
|
device->open_count--;
|
||||||
}
|
}
|
||||||
mutex_unlock(&device->dev_set->lock);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -415,12 +464,12 @@ int vfio_device_open(struct vfio_device *device,
|
|||||||
void vfio_device_close(struct vfio_device *device,
|
void vfio_device_close(struct vfio_device *device,
|
||||||
struct iommufd_ctx *iommufd)
|
struct iommufd_ctx *iommufd)
|
||||||
{
|
{
|
||||||
mutex_lock(&device->dev_set->lock);
|
lockdep_assert_held(&device->dev_set->lock);
|
||||||
|
|
||||||
vfio_assert_device_open(device);
|
vfio_assert_device_open(device);
|
||||||
if (device->open_count == 1)
|
if (device->open_count == 1)
|
||||||
vfio_device_last_close(device, iommufd);
|
vfio_device_last_close(device, iommufd);
|
||||||
device->open_count--;
|
device->open_count--;
|
||||||
mutex_unlock(&device->dev_set->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -46,7 +46,6 @@ struct vfio_device {
|
|||||||
struct vfio_device_set *dev_set;
|
struct vfio_device_set *dev_set;
|
||||||
struct list_head dev_set_list;
|
struct list_head dev_set_list;
|
||||||
unsigned int migration_flags;
|
unsigned int migration_flags;
|
||||||
/* Driver must reference the kvm during open_device or never touch it */
|
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
|
|
||||||
/* Members below here are private, not for driver use */
|
/* Members below here are private, not for driver use */
|
||||||
@@ -58,6 +57,7 @@ struct vfio_device {
|
|||||||
struct list_head group_next;
|
struct list_head group_next;
|
||||||
struct list_head iommu_entry;
|
struct list_head iommu_entry;
|
||||||
struct iommufd_access *iommufd_access;
|
struct iommufd_access *iommufd_access;
|
||||||
|
void (*put_kvm)(struct kvm *kvm);
|
||||||
#if IS_ENABLED(CONFIG_IOMMUFD)
|
#if IS_ENABLED(CONFIG_IOMMUFD)
|
||||||
struct iommufd_device *iommufd_device;
|
struct iommufd_device *iommufd_device;
|
||||||
struct iommufd_ctx *iommufd_ictx;
|
struct iommufd_ctx *iommufd_ictx;
|
||||||
|
|||||||
Reference in New Issue
Block a user