vfio/pci: Use the struct file as the handle not the vfio_group

VFIO PCI does a security check as part of hot reset to prove that the user
has permission to manipulate all the devices that will be impacted by the
reset.

Use a new API vfio_file_has_dev() to perform this security check against
the struct file directly and remove the vfio_group from VFIO PCI.

Since VFIO PCI was the last user of vfio_group_get_external_user() and
vfio_group_put_external_user() remove it as well.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/8-v3-f7729924a7ea+25e33-vfio_kvm_no_group_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Jason Gunthorpe
2022-05-04 16:14:46 -03:00
committed by Alex Williamson
parent 3e5449d5f9
commit 6a985ae80b
3 changed files with 40 additions and 75 deletions

View File

@@ -556,7 +556,7 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
struct vfio_pci_group_info { struct vfio_pci_group_info {
int count; int count;
struct vfio_group **groups; struct file **files;
}; };
static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot) static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
@@ -1018,10 +1018,10 @@ reset_info_exit:
} else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) { } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
struct vfio_pci_hot_reset hdr; struct vfio_pci_hot_reset hdr;
int32_t *group_fds; int32_t *group_fds;
struct vfio_group **groups; struct file **files;
struct vfio_pci_group_info info; struct vfio_pci_group_info info;
bool slot = false; bool slot = false;
int group_idx, count = 0, ret = 0; int file_idx, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count); minsz = offsetofend(struct vfio_pci_hot_reset, count);
@@ -1054,17 +1054,17 @@ reset_info_exit:
return -EINVAL; return -EINVAL;
group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL); files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
if (!group_fds || !groups) { if (!group_fds || !files) {
kfree(group_fds); kfree(group_fds);
kfree(groups); kfree(files);
return -ENOMEM; return -ENOMEM;
} }
if (copy_from_user(group_fds, (void __user *)(arg + minsz), if (copy_from_user(group_fds, (void __user *)(arg + minsz),
hdr.count * sizeof(*group_fds))) { hdr.count * sizeof(*group_fds))) {
kfree(group_fds); kfree(group_fds);
kfree(groups); kfree(files);
return -EFAULT; return -EFAULT;
} }
@@ -1073,22 +1073,22 @@ reset_info_exit:
* user interface and store the group and iommu ID. This * user interface and store the group and iommu ID. This
* ensures the group is held across the reset. * ensures the group is held across the reset.
*/ */
for (group_idx = 0; group_idx < hdr.count; group_idx++) { for (file_idx = 0; file_idx < hdr.count; file_idx++) {
struct vfio_group *group; struct file *file = fget(group_fds[file_idx]);
struct fd f = fdget(group_fds[group_idx]);
if (!f.file) { if (!file) {
ret = -EBADF; ret = -EBADF;
break; break;
} }
group = vfio_group_get_external_user(f.file); /* Ensure the FD is a vfio group FD.*/
fdput(f); if (!vfio_file_iommu_group(file)) {
if (IS_ERR(group)) { fput(file);
ret = PTR_ERR(group); ret = -EINVAL;
break; break;
} }
groups[group_idx] = group; files[file_idx] = file;
} }
kfree(group_fds); kfree(group_fds);
@@ -1098,15 +1098,15 @@ reset_info_exit:
goto hot_reset_release; goto hot_reset_release;
info.count = hdr.count; info.count = hdr.count;
info.groups = groups; info.files = files;
ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info); ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
hot_reset_release: hot_reset_release:
for (group_idx--; group_idx >= 0; group_idx--) for (file_idx--; file_idx >= 0; file_idx--)
vfio_group_put_external_user(groups[group_idx]); fput(files[file_idx]);
kfree(groups); kfree(files);
return ret; return ret;
} else if (cmd == VFIO_DEVICE_IOEVENTFD) { } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
struct vfio_device_ioeventfd ioeventfd; struct vfio_device_ioeventfd ioeventfd;
@@ -1972,7 +1972,7 @@ static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
unsigned int i; unsigned int i;
for (i = 0; i < groups->count; i++) for (i = 0; i < groups->count; i++)
if (groups->groups[i] == vdev->vdev.group) if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
return true; return true;
return false; return false;
} }

View File

@@ -1633,58 +1633,6 @@ static const struct file_operations vfio_device_fops = {
.mmap = vfio_device_fops_mmap, .mmap = vfio_device_fops_mmap,
}; };
/*
* External user API, exported by symbols to be linked dynamically.
*
* The protocol includes:
* 1. do normal VFIO init operation:
* - opening a new container;
* - attaching group(s) to it;
* - setting an IOMMU driver for a container.
* When IOMMU is set for a container, all groups in it are
* considered ready to use by an external user.
*
* 2. User space passes a group fd to an external user.
* The external user calls vfio_group_get_external_user()
* to verify that:
* - the group is initialized;
* - IOMMU is set for it.
* If both checks passed, vfio_group_get_external_user()
* increments the container user counter to prevent
* the VFIO group from disposal before KVM exits.
*
* 3. When the external KVM finishes, it calls
* vfio_group_put_external_user() to release the VFIO group.
* This call decrements the container user counter.
*/
struct vfio_group *vfio_group_get_external_user(struct file *filep)
{
struct vfio_group *group = filep->private_data;
int ret;
if (filep->f_op != &vfio_group_fops)
return ERR_PTR(-EINVAL);
ret = vfio_group_add_container_user(group);
if (ret)
return ERR_PTR(ret);
/*
* Since the caller holds the fget on the file group->users must be >= 1
*/
vfio_group_get(group);
return group;
}
EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
void vfio_group_put_external_user(struct vfio_group *group)
{
vfio_group_try_dissolve_container(group);
vfio_group_put(group);
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
/** /**
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file * @file: VFIO group file
@@ -1752,6 +1700,24 @@ void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
} }
EXPORT_SYMBOL_GPL(vfio_file_set_kvm); EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
/**
* vfio_file_has_dev - True if the VFIO file is a handle for device
* @file: VFIO file to check
* @device: Device that must be part of the file
*
* Returns true if given file has permission to manipulate the given device.
*/
bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
struct vfio_group *group = file->private_data;
if (file->f_op != &vfio_group_fops)
return false;
return group == device->group;
}
EXPORT_SYMBOL_GPL(vfio_file_has_dev);
/* /*
* Sub-module support * Sub-module support
*/ */

View File

@@ -138,11 +138,10 @@ int vfio_mig_get_next_state(struct vfio_device *device,
/* /*
* External user API * External user API
*/ */
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
extern struct iommu_group *vfio_file_iommu_group(struct file *file); extern struct iommu_group *vfio_file_iommu_group(struct file *file);
extern bool vfio_file_enforced_coherent(struct file *file); extern bool vfio_file_enforced_coherent(struct file *file);
extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm); extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
extern bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) #define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))