mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 08:03:01 +09:00
In order to support dirty page tracking, the driver has to implement the VFIO subsystem's vfio_log_ops. This includes log_start, log_stop, and log_read_and_clear. All of the tracker resources are allocated and dirty tracking on the device is started during log_start. The resources are cleaned up and dirty tracking on the device is stopped during log_stop. The dirty pages are determined and reported during log_read_and_clear. In order to support these callbacks admin queue commands are used. All of the adminq queue command structures and implementations are included as part of this patch. PDS_LM_CMD_DIRTY_STATUS is added to query the current status of dirty tracking on the device. This includes if it's enabled (i.e. number of regions being tracked from the device's perspective) and the maximum number of regions supported from the device's perspective. PDS_LM_CMD_DIRTY_ENABLE is added to enable dirty tracking on the specified number of regions and their iova ranges. PDS_LM_CMD_DIRTY_DISABLE is added to disable dirty tracking for all regions on the device. PDS_LM_CMD_READ_SEQ and PDS_LM_CMD_DIRTY_WRITE_ACK are added to support reading and acknowledging the currently dirtied pages. Signed-off-by: Brett Creeley <brett.creeley@amd.com> Signed-off-by: Shannon Nelson <shannon.nelson@amd.com> Reviewed-by: Simon Horman <horms@kernel.org> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Link: https://lore.kernel.org/r/20230807205755.29579-7-brett.creeley@amd.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
215 lines
5.5 KiB
C
215 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
|
|
|
|
#include <linux/vfio.h>
|
|
#include <linux/vfio_pci_core.h>
|
|
|
|
#include "lm.h"
|
|
#include "dirty.h"
|
|
#include "vfio_dev.h"
|
|
|
|
struct pci_dev *pds_vfio_to_pci_dev(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
return pds_vfio->vfio_coredev.pdev;
|
|
}
|
|
|
|
struct device *pds_vfio_to_dev(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
return &pds_vfio_to_pci_dev(pds_vfio)->dev;
|
|
}
|
|
|
|
struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
|
|
{
|
|
struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
|
|
|
|
return container_of(core_device, struct pds_vfio_pci_device,
|
|
vfio_coredev);
|
|
}
|
|
|
|
void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
again:
|
|
spin_lock(&pds_vfio->reset_lock);
|
|
if (pds_vfio->deferred_reset) {
|
|
pds_vfio->deferred_reset = false;
|
|
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
|
|
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
|
|
pds_vfio_put_restore_file(pds_vfio);
|
|
pds_vfio_put_save_file(pds_vfio);
|
|
pds_vfio_dirty_disable(pds_vfio, false);
|
|
}
|
|
spin_unlock(&pds_vfio->reset_lock);
|
|
goto again;
|
|
}
|
|
mutex_unlock(&pds_vfio->state_mutex);
|
|
spin_unlock(&pds_vfio->reset_lock);
|
|
}
|
|
|
|
void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
spin_lock(&pds_vfio->reset_lock);
|
|
pds_vfio->deferred_reset = true;
|
|
if (!mutex_trylock(&pds_vfio->state_mutex)) {
|
|
spin_unlock(&pds_vfio->reset_lock);
|
|
return;
|
|
}
|
|
spin_unlock(&pds_vfio->reset_lock);
|
|
pds_vfio_state_mutex_unlock(pds_vfio);
|
|
}
|
|
|
|
static struct file *
|
|
pds_vfio_set_device_state(struct vfio_device *vdev,
|
|
enum vfio_device_mig_state new_state)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
struct file *res = NULL;
|
|
|
|
mutex_lock(&pds_vfio->state_mutex);
|
|
while (new_state != pds_vfio->state) {
|
|
enum vfio_device_mig_state next_state;
|
|
|
|
int err = vfio_mig_get_next_state(vdev, pds_vfio->state,
|
|
new_state, &next_state);
|
|
if (err) {
|
|
res = ERR_PTR(err);
|
|
break;
|
|
}
|
|
|
|
res = pds_vfio_step_device_state_locked(pds_vfio, next_state);
|
|
if (IS_ERR(res))
|
|
break;
|
|
|
|
pds_vfio->state = next_state;
|
|
|
|
if (WARN_ON(res && new_state != pds_vfio->state)) {
|
|
res = ERR_PTR(-EINVAL);
|
|
break;
|
|
}
|
|
}
|
|
pds_vfio_state_mutex_unlock(pds_vfio);
|
|
|
|
return res;
|
|
}
|
|
|
|
static int pds_vfio_get_device_state(struct vfio_device *vdev,
|
|
enum vfio_device_mig_state *current_state)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
|
|
mutex_lock(&pds_vfio->state_mutex);
|
|
*current_state = pds_vfio->state;
|
|
pds_vfio_state_mutex_unlock(pds_vfio);
|
|
return 0;
|
|
}
|
|
|
|
static int pds_vfio_get_device_state_size(struct vfio_device *vdev,
|
|
unsigned long *stop_copy_length)
|
|
{
|
|
*stop_copy_length = PDS_LM_DEVICE_STATE_LENGTH;
|
|
return 0;
|
|
}
|
|
|
|
static const struct vfio_migration_ops pds_vfio_lm_ops = {
|
|
.migration_set_state = pds_vfio_set_device_state,
|
|
.migration_get_state = pds_vfio_get_device_state,
|
|
.migration_get_data_size = pds_vfio_get_device_state_size
|
|
};
|
|
|
|
static const struct vfio_log_ops pds_vfio_log_ops = {
|
|
.log_start = pds_vfio_dma_logging_start,
|
|
.log_stop = pds_vfio_dma_logging_stop,
|
|
.log_read_and_clear = pds_vfio_dma_logging_report,
|
|
};
|
|
|
|
static int pds_vfio_init_device(struct vfio_device *vdev)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
struct pci_dev *pdev = to_pci_dev(vdev->dev);
|
|
int err, vf_id, pci_id;
|
|
|
|
vf_id = pci_iov_vf_id(pdev);
|
|
if (vf_id < 0)
|
|
return vf_id;
|
|
|
|
err = vfio_pci_core_init_dev(vdev);
|
|
if (err)
|
|
return err;
|
|
|
|
pds_vfio->vf_id = vf_id;
|
|
|
|
vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
|
|
vdev->mig_ops = &pds_vfio_lm_ops;
|
|
vdev->log_ops = &pds_vfio_log_ops;
|
|
|
|
pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
|
dev_dbg(&pdev->dev,
|
|
"%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n",
|
|
__func__, pci_dev_id(pdev->physfn), pci_id, vf_id,
|
|
pci_domain_nr(pdev->bus), pds_vfio);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pds_vfio_open_device(struct vfio_device *vdev)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
int err;
|
|
|
|
err = vfio_pci_core_enable(&pds_vfio->vfio_coredev);
|
|
if (err)
|
|
return err;
|
|
|
|
mutex_init(&pds_vfio->state_mutex);
|
|
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
|
|
|
|
vfio_pci_core_finish_enable(&pds_vfio->vfio_coredev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void pds_vfio_close_device(struct vfio_device *vdev)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
|
|
mutex_lock(&pds_vfio->state_mutex);
|
|
pds_vfio_put_restore_file(pds_vfio);
|
|
pds_vfio_put_save_file(pds_vfio);
|
|
pds_vfio_dirty_disable(pds_vfio, true);
|
|
mutex_unlock(&pds_vfio->state_mutex);
|
|
mutex_destroy(&pds_vfio->state_mutex);
|
|
vfio_pci_core_close_device(vdev);
|
|
}
|
|
|
|
static const struct vfio_device_ops pds_vfio_ops = {
|
|
.name = "pds-vfio",
|
|
.init = pds_vfio_init_device,
|
|
.release = vfio_pci_core_release_dev,
|
|
.open_device = pds_vfio_open_device,
|
|
.close_device = pds_vfio_close_device,
|
|
.ioctl = vfio_pci_core_ioctl,
|
|
.device_feature = vfio_pci_core_ioctl_feature,
|
|
.read = vfio_pci_core_read,
|
|
.write = vfio_pci_core_write,
|
|
.mmap = vfio_pci_core_mmap,
|
|
.request = vfio_pci_core_request,
|
|
.match = vfio_pci_core_match,
|
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
|
};
|
|
|
|
const struct vfio_device_ops *pds_vfio_ops_info(void)
|
|
{
|
|
return &pds_vfio_ops;
|
|
}
|