diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/Makefile b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/Makefile index dcab573d6d3336a618b196327daa561185838ded..16da4c2fb3f4328aa08e32235ace4fa5d4096a99 100644 --- a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/Makefile +++ b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/Makefile @@ -23,7 +23,8 @@ default: CONFIG_CRYPTO_DEV_HISI_ZIP=m \ CONFIG_CRYPTO_DEV_HISI_HPRE=m \ CONFIG_CRYPTO_DEV_HISI_SEC2=m \ - CONFIG_CRYPTO_DEV_HISI_TRNG=m + CONFIG_CRYPTO_DEV_HISI_TRNG=m \ + CONFIG_CRYPTO_DEV_HISI_MIGRATION=m #copy: # cp -f $(shell pwd)/include_linux/uacce.h $(KSP)/include/linux # cp -f $(shell pwd)/include_uapi_linux/uacce.h $(KSP)/include/uapi/linux @@ -40,6 +41,7 @@ install: -modprobe hisi_sec2 uacce_mode=1 pf_q_num=256 -modprobe hisi_hpre uacce_mode=1 pf_q_num=256 -modprobe hisi_zip uacce_mode=1 pf_q_num=256 + -modprobe hisi_migration -echo "options hisi_sec2 uacce_mode=1 pf_q_num=256" > /etc/modprobe.d/hisi_sec2.conf -echo "options hisi_hpre uacce_mode=1 pf_q_num=256" > /etc/modprobe.d/hisi_hpre.conf -echo "options hisi_zip uacce_mode=1 pf_q_num=256" > /etc/modprobe.d/hisi_zip.conf @@ -61,6 +63,7 @@ check: done uninstall: + modprobe -r hisi_migration modprobe -r hisi_zip modprobe -r hisi_hpre modprobe -r hisi_sec2 @@ -75,6 +78,7 @@ uninstall: rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_sec2.ko rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_hpre.ko rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_zip.ko + rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_migration.ko nosva: $(shell mkdir -p /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra) @@ -88,6 +92,7 @@ nosva: -modprobe hisi_sec2 uacce_mode=2 pf_q_num=256 -modprobe hisi_hpre uacce_mode=2 pf_q_num=256 -modprobe hisi_zip uacce_mode=2 pf_q_num=256 + -modprobe hisi_migration -echo "options hisi_sec2 uacce_mode=2 pf_q_num=256" > /etc/modprobe.d/hisi_sec2.conf -echo "options hisi_hpre uacce_mode=2 pf_q_num=256" > /etc/modprobe.d/hisi_hpre.conf -echo "options hisi_zip uacce_mode=2 pf_q_num=256" > /etc/modprobe.d/hisi_zip.conf diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/Makefile b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/Makefile index a62965be8633147cdfd1a666c7f6a6e565efd04e..12356196650ca4206845332b1b194f7130784bdd 100644 --- a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/Makefile +++ b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o hisi_qm-objs = qm.o sgl.o debugfs.o obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ +obj-$(CONFIG_CRYPTO_DEV_HISI_MIGRATION) += migration/ diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/Makefile b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6493f527dd9813051c9cd7173ec6a63911ac1425 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_MIGRATION) += hisi_migration.o +hisi_migration-objs = acc_vf_migration.o diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/acc_vf_migration.c b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/acc_vf_migration.c new file mode 100644 index 0000000000000000000000000000000000000000..7547e6e2138073cb2c8016d0c2c9c5868ae26902 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/acc_vf_migration.c @@ -0,0 +1,1788 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 HiSilicon Limited. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../vfio.h" + +#include "acc_vf_migration.h" + +#define VDM_OFFSET(x) offsetof(struct vfio_device_migration_info, x) +static struct dentry *mig_debugfs_root; +static int mig_root_ref; + +/* return 0 VM acc device ready, -ETIMEDOUT hardware timeout */ +static int qm_wait_dev_ready(struct hisi_qm *qm) +{ + u32 val; + + return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE, + val, !(val & 0x1), POLL_PERIOD, POLL_TIMEOUT); +} + +/* 128 bit should be written to hardware at one time to trigger a mailbox */ +static void qm_mb_write(struct hisi_qm *qm, const void *src) +{ + void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0; + unsigned long tmp1 = 0; + + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_toio(fun_base, src, 16); + wmb(); + return; + } + + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dsb sy\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char __iomem *)fun_base)) + : "Q" (*((char *)src)) + : "memory"); +} + +/* 128 bit should be read from hardware at one time */ +static void qm_mb_read(struct hisi_qm *qm, void *dst) +{ + const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0, tmp1 = 0; + + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_fromio(dst, fun_base, 16); + dma_wmb(); + return; + } + + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)dst)) + : "Q" (*((char __iomem *)fun_base)) + : "memory"); +} + +static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, + u64 base, u16 queue, bool op) +{ + mailbox->w0 = cpu_to_le16((cmd) | + ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | + (0x1 << QM_MB_BUSY_SHIFT)); + mailbox->queue_num = cpu_to_le16(queue); + mailbox->base_l = cpu_to_le32(lower_32_bits(base)); + mailbox->base_h = cpu_to_le32(upper_32_bits(base)); + mailbox->rsvd = 0; +} + +static int qm_wait_mb_ready(struct hisi_qm *qm) +{ + struct qm_mailbox mailbox; + int i = 0; + + while (i++ < QM_MB_WAIT_READY_CNT) { + qm_mb_read(qm, &mailbox); + if (!((le16_to_cpu(mailbox.w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + return 0; + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + return -EBUSY; +} + +static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int i = 0; + + while (++i) { + qm_mb_read(qm, mailbox); + if (!((le16_to_cpu(mailbox->w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + break; + + if (i == QM_MB_MAX_WAIT_CNT) { + dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); + return -ETIMEDOUT; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); + return -EIO; + } + + return 0; +} + +static int qm_mb(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int ret; + + mutex_lock(&qm->mailbox_lock); + ret = qm_wait_mb_ready(qm); + if (ret) + goto unlock; + + qm_mb_write(qm, mailbox); + ret = qm_wait_mb_finish(qm, mailbox); + +unlock: + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + +static int qm_config_set(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, + u16 queue, bool op) +{ + struct qm_mailbox mailbox; + + qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); + + return qm_mb(qm, &mailbox); +} + +static int qm_config_get(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +{ + struct qm_mailbox mailbox; + int ret; + + qm_mb_pre_init(&mailbox, cmd, 0, queue, 1); + + ret = qm_mb(qm, &mailbox); + if (ret) + return ret; + + *base = le32_to_cpu(mailbox.base_l) | + ((u64)le32_to_cpu(mailbox.base_h) << 32); + + return 0; +} + +static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, + u16 index, u8 priority) +{ + void __iomem *io_base = qm->io_base; + u16 randata = 0; + u64 doorbell; + + if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) + io_base = qm->db_io_base + (u64)qn * qm->db_interval + + QM_DOORBELL_SQ_CQ_BASE_V2; + else + io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; + + doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | + ((u64)randata << QM_DB_RAND_SHIFT_V2) | + ((u64)index << QM_DB_INDEX_SHIFT_V2) | + ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); + + writeq(doorbell, io_base); +} + +/* + * Each state Reg is checked 100 times, + * with a delay of 100 microseconds after each check + */ +static u32 acc_check_reg_state(struct hisi_qm *qm, u32 regs) +{ + int check_times = 0; + u32 state; + + state = readl(qm->io_base + regs); + while (state && check_times < ERROR_CHECK_TIMEOUT) { + udelay(CHECK_DELAY_TIME); + state = readl(qm->io_base + regs); + check_times++; + } + + return state; +} + +/* Check the PF's RAS state and Function INT state */ +static int qm_check_int_state(struct acc_vf_migration *acc_vf_dev) +{ + struct hisi_qm *vfqm = acc_vf_dev->vf_qm; + struct hisi_qm *qm = acc_vf_dev->pf_qm; + struct device *dev = &qm->pdev->dev; + u32 state; + + /* Check RAS state */ + state = acc_check_reg_state(qm, QM_ABNORMAL_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM RAS state!\n"); + return -EBUSY; + } + + /* Check Function Communication state between PF and VF */ + state = acc_check_reg_state(vfqm, QM_IFC_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM IFC INT state!\n"); + return -EBUSY; + } + state = acc_check_reg_state(vfqm, QM_IFC_INT_SET_V); + if (state) { + dev_err(dev, "failed to check QM IFC INT SET state!\n"); + return -EBUSY; + } + + /* Check submodule task state */ + switch (acc_vf_dev->acc_type) { + case HISI_SEC: + state = acc_check_reg_state(qm, SEC_CORE_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM SEC Core INT state!\n"); + return -EBUSY; + } + break; + case HISI_HPRE: + state = acc_check_reg_state(qm, HPRE_HAC_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM HPRE HAC INT state!\n"); + return -EBUSY; + } + break; + case HISI_ZIP: + state = acc_check_reg_state(qm, HZIP_CORE_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM ZIP Core INT state!\n"); + return -EBUSY; + } + break; + default: + dev_err(dev, "failed to detect acc module type!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_read_reg(struct hisi_qm *qm, u32 reg_addr, + u32 *data, u8 nums) +{ + int i; + + if (nums < 1 || nums > QM_REGS_MAX_LEN) { + dev_err(&qm->pdev->dev, "QM read input parameter is error!\n"); + return -EINVAL; + } + + for (i = 0; i < nums; i++) { + data[i] = readl(qm->io_base + reg_addr); + reg_addr += QM_REG_ADDR_OFFSET; + } + + return 0; +} + +static int qm_write_reg(struct hisi_qm *qm, u32 reg_addr, + u32 *data, u8 nums) +{ + int i; + + if (nums < 1 || nums > QM_REGS_MAX_LEN) { + dev_err(&qm->pdev->dev, "QM write input parameter is error!\n"); + return -EINVAL; + } + + for (i = 0; i < nums; i++) { + writel(data[i], qm->io_base + reg_addr); + reg_addr += QM_REG_ADDR_OFFSET; + } + + return 0; +} + +static int qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) +{ + u64 sqc_vft; + int ret; + + ret = qm_config_get(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); + if (ret) + return ret; + + *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + *number = (QM_SQC_VFT_NUM_MASK_V2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; + + return 0; +} + +static int qm_rw_regs_read(struct hisi_qm *qm, struct acc_vf_data *vf_data) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + ret = qm_read_reg(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK!\n"); + return ret; + } + + ret = qm_read_reg(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to read QM_VF_EQ_INT_MASK!\n"); + return ret; + } + + ret = qm_read_reg(qm, QM_IFC_INT_SOURCE_V, + &vf_data->ifc_int_source, 1); + if (ret) { + dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V!\n"); + return ret; + } + + ret = qm_read_reg(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1); + if (ret) { + dev_err(dev, "failed to read QM_IFC_INT_MASK!\n"); + return ret; + } + + ret = qm_read_reg(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1); + if (ret) { + dev_err(dev, "failed to read QM_IFC_INT_SET_V!\n"); + return ret; + } + + ret = qm_read_reg(qm, QM_PAGE_SIZE, &vf_data->page_size, 1); + if (ret) { + dev_err(dev, "failed to read QM_PAGE_SIZE!\n"); + return ret; + } + + ret = qm_read_reg(qm, QM_VF_STATE, &vf_data->vf_state, 1); + if (ret) { + dev_err(dev, "failed to read QM_VF_STATE!\n"); + return ret; + } + + /* QM_EQC_DW has 7 regs */ + ret = qm_read_reg(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7); + if (ret) { + dev_err(dev, "failed to read QM_EQC_DW!\n"); + return ret; + } + + /* QM_AEQC_DW has 7 regs */ + ret = qm_read_reg(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7); + if (ret) { + dev_err(dev, "failed to read QM_AEQC_DW!\n"); + return ret; + } + + return 0; +} + +static int qm_rw_regs_write(struct hisi_qm *qm, struct acc_vf_data *vf_data) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + /* check VF state */ + if (unlikely(qm_wait_mb_ready(qm))) { + dev_err(&qm->pdev->dev, "QM device is not ready to write!\n"); + return -EBUSY; + } + + ret = qm_write_reg(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK!\n"); + return ret; + } + + ret = qm_write_reg(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_EQ_INT_MASK!\n"); + return ret; + } + + ret = qm_write_reg(qm, QM_IFC_INT_SOURCE_V, + &vf_data->ifc_int_source, 1); + if (ret) { + dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V!\n"); + return ret; + } + + ret = qm_write_reg(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1); + if (ret) { + dev_err(dev, "failed to write QM_IFC_INT_MASK!\n"); + return ret; + } + + ret = qm_write_reg(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1); + if (ret) { + dev_err(dev, "failed to write QM_IFC_INT_SET_V!\n"); + return ret; + } + + ret = qm_write_reg(qm, QM_PAGE_SIZE, &vf_data->page_size, 1); + if (ret) { + dev_err(dev, "failed to write QM_PAGE_SIZE!\n"); + return ret; + } + + ret = qm_write_reg(qm, QM_VF_STATE, &vf_data->vf_state, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_STATE!\n"); + return ret; + } + + /* QM_EQC_DW has 7 regs */ + ret = qm_write_reg(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7); + if (ret) { + dev_err(dev, "failed to write QM_EQC_DW!\n"); + return ret; + } + + /* QM_AEQC_DW has 7 regs */ + ret = qm_write_reg(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7); + if (ret) { + dev_err(dev, "failed to write QM_AEQC_DW!\n"); + return ret; + } + + return 0; +} + +static void vf_qm_xeqc_save(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct acc_vf_data *vf_data = acc_vf_dev->vf_data; + u16 eq_head, aeq_head; + + eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0); + + aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0); +} + +/* + * the vf QM have unbind from host, insmod in the VM + * so, qm just have the addr from pci dev + * others is null. + * so we need read from the SEC hardware REGs. + */ +static int vf_migration_data_store(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct acc_vf_data *vf_data = acc_vf_dev->vf_data; + struct device *dev = &qm->pdev->dev; + int ret; + + ret = qm_rw_regs_read(qm, vf_data); + if (ret) { + dev_err(dev, "failed to read QM regs!\n"); + return -EINVAL; + } + + /* + * every Reg is 32 bit, the dma address is 64 bit + * so, the dma address is store in the Reg2 and Reg1 + */ + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; + + /* Through SQC_BT/CQC_BT to get sqc and cqc address */ + ret = qm_config_get(qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0); + if (ret) { + dev_err(dev, "failed to read SQC addr!\n"); + return -EINVAL; + } + + ret = qm_config_get(qm, &vf_data->cqc_dma, QM_MB_CMD_CQC_BT, 0); + if (ret) { + dev_err(dev, "failed to read CQC addr!\n"); + return -EINVAL; + } + + /* Save eqc and aeqc interrupt information */ + vf_qm_xeqc_save(qm, acc_vf_dev); + + return 0; +} + +static void qm_dev_cmd_init(struct hisi_qm *qm) +{ + /* clear VF communication status registers. */ + writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V); + + /* enable pf and vf communication. */ + writel(0x0, qm->io_base + QM_IFC_INT_MASK); +} + +static void vf_qm_fun_restart(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct acc_vf_data *vf_data = acc_vf_dev->vf_data; + struct device *dev = &qm->pdev->dev; + int i; + + /* + * When the Guest is rebooted or reseted, the SMMU page table + * will be destroyed, and the QP queue cannot be returned + * normally at this time. so if Guest acc driver have removed, + * don't need to restart QP. + */ + if (vf_data->vf_state != QM_READY) { + dev_err(dev, "failed to restart VF!\n"); + return; + } + + for (i = 0; i < qm->qp_num; i++) + qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1); +} + +static int vf_match_info_check(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct acc_vf_data *vf_data = acc_vf_dev->vf_data; + struct hisi_qm *pf_qm = acc_vf_dev->pf_qm; + struct device *dev = &qm->pdev->dev; + u32 que_iso_state; + int ret; + + /* vf acc type check */ + if (vf_data->acc_type != acc_vf_dev->acc_type) { + dev_err(dev, "failed to match VF acc type!\n"); + return -EINVAL; + } + + /* vf qp num check */ + ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret || qm->qp_num <= 1) { + dev_err(dev, "failed to get vft qp nums!\n"); + return ret; + } + + if (vf_data->qp_num != qm->qp_num) { + dev_err(dev, "failed to match VF qp num!\n"); + return -EINVAL; + } + + /* vf isolation state check */ + ret = qm_read_reg(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1); + if (ret) { + dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n"); + return ret; + } + if (vf_data->que_iso_cfg != que_iso_state) { + dev_err(dev, "failed to match isolation state!\n"); + return -EINVAL; + } + + return 0; +} + +static int vf_migration_data_recover(struct hisi_qm *qm, + struct acc_vf_data *vf_data) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + qm->eqe_dma = vf_data->eqe_dma; + qm->aeqe_dma = vf_data->aeqe_dma; + qm->sqc_dma = vf_data->sqc_dma; + qm->cqc_dma = vf_data->cqc_dma; + + qm->qp_base = vf_data->qp_base; + qm->qp_num = vf_data->qp_num; + + ret = qm_rw_regs_write(qm, vf_data); + if (ret) { + dev_err(dev, "Set VF regs failed!\n"); + return ret; + } + + ret = qm_config_set(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + if (ret) { + dev_err(dev, "Set sqc failed!\n"); + return ret; + } + + ret = qm_config_set(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + if (ret) { + dev_err(dev, "Set cqc failed!\n"); + return ret; + } + + /* which ACC module need to reinit? */ + qm_dev_cmd_init(qm); + + return 0; +} + +static int vf_qm_cache_wb(struct hisi_qm *qm) +{ + unsigned int val; + + writel(0x1, qm->io_base + QM_CACHE_WB_START); + if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, + val, val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT)) { + dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail!\n"); + return -EINVAL; + } + + return 0; +} + +static int vf_qm_func_stop(struct hisi_qm *qm) +{ + return qm_config_set(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); +} + +static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, + u32 *rbase, u32 *rnumber) +{ + unsigned int val; + u64 sqc_vft; + int ret; + + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); + if (ret) + return ret; + + writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); + /* 0 mean SQC VFT */ + writel(0x0, qm->io_base + QM_VFT_CFG_TYPE); + writel(vf_id, qm->io_base + QM_VFT_CFG); + + writel(0x0, qm->io_base + QM_VFT_CFG_RDY); + writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); + + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); + if (ret) + return ret; + + sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | + ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << + QM_XQC_ADDR_OFFSET); + *rbase = QM_SQC_VFT_BASE_MASK_V2 & + (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + *rnumber = (QM_SQC_VFT_NUM_MASK_V2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; + + return 0; +} + +static int pf_qm_state_pre_save(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct acc_vf_data *vf_data = acc_vf_dev->vf_data; + struct device *dev = &qm->pdev->dev; + int vf_id = acc_vf_dev->vf_id; + int ret; + + /* Vf acc type save */ + vf_data->acc_type = acc_vf_dev->acc_type; + + /* Vf qp num save from PF */ + ret = pf_qm_get_qp_num(qm, vf_id, &vf_data->qp_base, &vf_data->qp_num); + if (ret) { + dev_err(dev, "failed to get vft qp nums!\n"); + return -EINVAL; + } + + /* Vf isolation state save from PF */ + ret = qm_read_reg(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1); + if (ret) { + dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n"); + return ret; + } + + return 0; +} + +static int vf_qm_state_save(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct device *dev = &acc_vf_dev->vf_dev->dev; + int ret; + + /* + * check VM task driver state + * if vf_ready == 0x1, skip migrate. + */ + if (unlikely(qm_wait_dev_ready(qm))) { + acc_vf_dev->mig_ignore = true; + dev_err(&qm->pdev->dev, "QM device is not ready to read!\n"); + return 0; + } + + /* First stop the ACC vf function */ + ret = vf_qm_func_stop(qm); + if (ret) { + dev_err(dev, "failed to stop QM VF function!\n"); + return ret; + } + + /* Check the VF's RAS and Interrution state */ + ret = qm_check_int_state(acc_vf_dev); + if (ret) { + dev_err(dev, "failed to check QM INT state!\n"); + goto state_error; + } + + /* hisi_qm_cache_wb store cache data to DDR */ + ret = vf_qm_cache_wb(qm); + if (ret) { + dev_err(dev, "failed to writeback QM Cache!\n"); + goto state_error; + } + + ret = vf_migration_data_store(qm, acc_vf_dev); + if (ret) { + dev_err(dev, "failed to get and store migration data!\n"); + goto state_error; + } + + return 0; + +state_error: + vf_qm_fun_restart(qm, acc_vf_dev); + return ret; +} + +static int vf_qm_state_resume(struct hisi_qm *qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct device *dev = &acc_vf_dev->vf_dev->dev; + int ret; + + /* recover data to VF */ + ret = vf_migration_data_recover(qm, acc_vf_dev->vf_data); + if (ret) { + dev_err(dev, "failed to recover the VF!\n"); + return ret; + } + + /* restart all destination VF's QP */ + vf_qm_fun_restart(qm, acc_vf_dev); + + return 0; +} + +static int acc_vf_set_device_state(struct acc_vf_migration *acc_vf_dev, + u32 state) +{ + struct vfio_device_migration_info *mig_ctl = acc_vf_dev->mig_ctl; + struct device *dev = &acc_vf_dev->vf_dev->dev; + struct hisi_qm *pfqm = acc_vf_dev->pf_qm; + struct hisi_qm *qm = acc_vf_dev->vf_qm; + int ret = 0; + + if (state == mig_ctl->device_state) + return 0; + + switch (state) { + case VFIO_DEVICE_STATE_RUNNING: + if (!mig_ctl->data_size) + break; + + if (mig_ctl->device_state == VFIO_DEVICE_STATE_RESUMING) { + ret = vf_qm_state_resume(qm, acc_vf_dev); + if (ret) { + dev_err(dev, "failed to resume device!\n"); + return -EFAULT; + } + } + + break; + case VFIO_DEVICE_STATE_SAVING | VFIO_DEVICE_STATE_RUNNING: + /* ACC should in the pre cycle to read match information data */ + ret = pf_qm_state_pre_save(pfqm, acc_vf_dev); + if (ret) { + dev_err(dev, "failed to pre save device state!\n"); + return -EFAULT; + } + + /* set the pending_byte and match data size */ + mig_ctl->data_size = QM_MATCH_SIZE; + mig_ctl->pending_bytes = mig_ctl->data_size; + + break; + case VFIO_DEVICE_STATE_SAVING: + /* stop the vf function */ + ret = vf_qm_state_save(qm, acc_vf_dev); + if (ret) { + dev_err(dev, "failed to save device state!\n"); + return -EFAULT; + } + + if (acc_vf_dev->mig_ignore) { + mig_ctl->data_size = 0; + mig_ctl->pending_bytes = 0; + break; + } + + /* set the pending_byte and data_size */ + mig_ctl->data_size = sizeof(struct acc_vf_data); + mig_ctl->pending_bytes = mig_ctl->data_size; + + break; + case VFIO_DEVICE_STATE_STOP: + case VFIO_DEVICE_STATE_RESUMING: + break; + default: + ret = -EFAULT; + } + + if (!ret) { + dev_info(dev, "migration state: %s ----------> %s!\n", + vf_dev_state[mig_ctl->device_state], + vf_dev_state[state]); + mig_ctl->device_state = state; + } + + return ret; +} + +static int acc_vf_data_transfer(struct acc_vf_migration *acc_vf_dev, + char __user *buf, size_t count, u64 pos, bool iswrite) +{ + struct vfio_device_migration_info *mig_ctl = acc_vf_dev->mig_ctl; + void *data_addr = acc_vf_dev->vf_data; + int ret = 0; + + if (!count) { + dev_err(&acc_vf_dev->vf_dev->dev, + "Qemu operation data size error!\n"); + return -EINVAL; + } + + data_addr += pos - mig_ctl->data_offset; + if (iswrite) { + ret = copy_from_user(data_addr, buf, count) ? + -EFAULT : count; + if (ret == count) + mig_ctl->pending_bytes += count; + } else { + ret = copy_to_user(buf, data_addr, count) ? + -EFAULT : count; + if (ret == count) + mig_ctl->pending_bytes -= count; + } + + return ret; +} + +static int acc_vf_region_migration_rw(struct acc_vf_migration *acc_vf_dev, + char __user *buf, size_t count, loff_t *ppos, bool iswrite) +{ + struct vfio_device_migration_info *mig_ctl = acc_vf_dev->mig_ctl; + struct device *dev = &acc_vf_dev->vf_dev->dev; + struct hisi_qm *qm = acc_vf_dev->vf_qm; + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + u32 device_state; + int ret = 0; + + switch (pos) { + case VDM_OFFSET(device_state): + if (count != sizeof(mig_ctl->device_state)) { + ret = -EINVAL; + break; + } + + if (iswrite) { + if (copy_from_user(&device_state, buf, count)) { + ret = -EFAULT; + break; + } + + ret = acc_vf_set_device_state(acc_vf_dev, + device_state) ? ret : count; + } else { + ret = copy_to_user(buf, &mig_ctl->device_state, + count) ? -EFAULT : count; + } + break; + case VDM_OFFSET(reserved): + ret = -EFAULT; + break; + case VDM_OFFSET(pending_bytes): + if (count != sizeof(mig_ctl->pending_bytes)) { + ret = -EINVAL; + break; + } + + if (iswrite) + ret = -EFAULT; + else + ret = copy_to_user(buf, &mig_ctl->pending_bytes, + count) ? -EFAULT : count; + break; + case VDM_OFFSET(data_offset): + if (count != sizeof(mig_ctl->data_offset)) { + ret = -EINVAL; + break; + } + if (iswrite) + ret = copy_from_user(&mig_ctl->data_offset, buf, count) ? + -EFAULT : count; + else + ret = copy_to_user(buf, &mig_ctl->data_offset, count) ? + -EFAULT : count; + break; + case VDM_OFFSET(data_size): + if (count != sizeof(mig_ctl->data_size)) { + ret = -EINVAL; + break; + } + + if (iswrite) + ret = copy_from_user(&mig_ctl->data_size, buf, count) ? + -EFAULT : count; + else + ret = copy_to_user(buf, &mig_ctl->data_size, count) ? + -EFAULT : count; + break; + default: + ret = -EFAULT; + break; + } + + /* Transfer data section */ + if (pos >= mig_ctl->data_offset && + pos < MIGRATION_REGION_SZ) { + ret = acc_vf_data_transfer(acc_vf_dev, buf, + count, pos, iswrite); + if (ret != count) + return ret; + } + + if (mig_ctl->device_state == VFIO_DEVICE_STATE_RESUMING && + mig_ctl->pending_bytes == QM_MATCH_SIZE && + mig_ctl->data_size == QM_MATCH_SIZE) { + /* check the VF match information */ + ret = vf_match_info_check(qm, acc_vf_dev); + if (ret) { + dev_err(dev, "failed to check match information!\n"); + return -EFAULT; + } + ret = count; + + /* clear the VF match data size */ + mig_ctl->pending_bytes = 0; + mig_ctl->data_size = 0; + } + return ret; +} + +static int acc_vf_region_migration_mmap(struct acc_vf_migration *acc_vf_dev, + struct acc_vf_region *region, + struct vm_area_struct *vma) +{ + return -EFAULT; +} + +static void acc_vf_region_migration_release(struct acc_vf_migration *acc_vf_dev, + struct acc_vf_region *region) +{ + kfree(acc_vf_dev->mig_ctl); + acc_vf_dev->mig_ctl = NULL; +} + +static const struct acc_vf_region_ops acc_vf_region_ops_migration = { + .rw = acc_vf_region_migration_rw, + .release = acc_vf_region_migration_release, + .mmap = acc_vf_region_migration_mmap, +}; + +static int acc_vf_register_region(struct acc_vf_migration *acc_vf_dev, + const struct acc_vf_region_ops *ops, + void *data) +{ + struct acc_vf_region *regions; + + regions = krealloc(acc_vf_dev->regions, + (acc_vf_dev->num_regions + 1) * sizeof(*regions), + GFP_KERNEL); + if (!regions) + return -ENOMEM; + + acc_vf_dev->regions = regions; + regions[acc_vf_dev->num_regions].type = + VFIO_REGION_TYPE_MIGRATION; + regions[acc_vf_dev->num_regions].subtype = + VFIO_REGION_SUBTYPE_MIGRATION; + regions[acc_vf_dev->num_regions].ops = ops; + regions[acc_vf_dev->num_regions].size = + MIGRATION_REGION_SZ; + regions[acc_vf_dev->num_regions].flags = + VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; + regions[acc_vf_dev->num_regions].data = data; + acc_vf_dev->num_regions++; + + return 0; +} + +static long acc_vf_get_region_info(void *device_data, + unsigned int cmd, unsigned long arg) +{ + int num_vdev_regions = vfio_pci_num_regions(device_data); + struct acc_vf_migration *acc_vf_dev = + vfio_pci_vendor_data(device_data); + struct vfio_region_info_cap_type cap_type; + struct acc_vf_region *regions; + struct vfio_region_info info; + struct vfio_info_cap caps; + unsigned long minsz; + int index, ret; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (cmd != VFIO_DEVICE_GET_REGION_INFO) + return -EINVAL; + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + if (info.index < VFIO_PCI_NUM_REGIONS + num_vdev_regions) + goto default_handle; + + index = info.index - VFIO_PCI_NUM_REGIONS - num_vdev_regions; + if (index > acc_vf_dev->num_regions) { + dev_err(&acc_vf_dev->vf_dev->dev, + "failed to check region numbers!\n"); + return -EINVAL; + } + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + regions = acc_vf_dev->regions; + info.size = regions[index].size; + info.flags = regions[index].flags; + caps.buf = NULL; + caps.size = 0; + cap_type.header.id = VFIO_REGION_INFO_CAP_TYPE; + cap_type.header.version = 1; + cap_type.type = regions[index].type; + cap_type.subtype = regions[index].subtype; + + ret = vfio_info_add_capability(&caps, &cap_type.header, + sizeof(cap_type)); + if (ret) + return ret; + + if (regions[index].ops->add_cap) { + ret = regions[index].ops->add_cap(acc_vf_dev, + ®ions[index], &caps); + if (ret) { + kfree(caps.buf); + return ret; + } + } + + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + sizeof(info), + caps.buf, caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + kfree(caps.buf); + } + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + +default_handle: + ret = vfio_pci_ioctl(device_data, cmd, arg); + if (ret) + return ret; + + if (info.index == VFIO_PCI_BAR0_REGION_INDEX) { + if (!acc_vf_dev->in_dirty_track) + return ret; + + /* read default handler's data back */ + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + /* update customized region info */ + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + } + + if (info.index == VFIO_PCI_BAR2_REGION_INDEX) { + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + /* + * ACC VF dev BAR2 region(64K) consists of both functional + * register space and migration control register space. + * Report only the first 32K(functional region) to Guest. + */ + info.size = pci_resource_len(acc_vf_dev->vf_dev, info.index) >> 1; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE | + VFIO_REGION_INFO_FLAG_MMAP; + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + } + + return ret; +} + +static int acc_vf_open(void *device_data) +{ + struct acc_vf_migration *acc_vf_dev = + vfio_pci_vendor_data(device_data); + struct vfio_device_migration_info *mig_ctl; + __u64 mig_offset; + void *vf_data; + int ret; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + mutex_lock(&acc_vf_dev->reflock); + if (!acc_vf_dev->refcnt) { + ret = acc_vf_register_region(acc_vf_dev, + &acc_vf_region_ops_migration, + NULL); + if (ret) + goto region_error; + vfio_pci_set_vendor_regions(device_data, + acc_vf_dev->num_regions); + + /* the data region must follow migration info */ + mig_offset = sizeof(struct vfio_device_migration_info); + mig_ctl = kzalloc(MIGRATION_REGION_SZ, GFP_KERNEL); + if (!mig_ctl) { + ret = -ENOMEM; + goto mig_error; + } + acc_vf_dev->mig_ctl = mig_ctl; + + vf_data = (void *)mig_ctl + mig_offset; + acc_vf_dev->vf_data = vf_data; + + mig_ctl->device_state = VFIO_DEVICE_STATE_RUNNING; + mig_ctl->data_offset = mig_offset; + mig_ctl->data_size = 0; + } + + ret = vfio_pci_open(device_data); + if (ret) + goto open_error; + + acc_vf_dev->refcnt++; + mutex_unlock(&acc_vf_dev->reflock); + + return 0; + +open_error: + if (!acc_vf_dev->refcnt) { + kfree(acc_vf_dev->mig_ctl); + acc_vf_dev->mig_ctl = NULL; + } +mig_error: + vfio_pci_set_vendor_regions(device_data, 0); +region_error: + mutex_unlock(&acc_vf_dev->reflock); + module_put(THIS_MODULE); + return ret; +} + +static void acc_vf_release(void *device_data) +{ + struct acc_vf_migration *acc_vf_dev = + vfio_pci_vendor_data(device_data); + int i; + + mutex_lock(&acc_vf_dev->reflock); + if (!--acc_vf_dev->refcnt) { + for (i = 0; i < acc_vf_dev->num_regions; i++) { + if (!acc_vf_dev->regions[i].ops) + continue; + acc_vf_dev->regions[i].ops->release(acc_vf_dev, + &acc_vf_dev->regions[i]); + } + kfree(acc_vf_dev->regions); + acc_vf_dev->regions = NULL; + acc_vf_dev->num_regions = 0; + vfio_pci_set_vendor_regions(device_data, 0); + + kfree(acc_vf_dev->mig_ctl); + acc_vf_dev->mig_ctl = NULL; + } + vfio_pci_release(device_data); + mutex_unlock(&acc_vf_dev->reflock); + module_put(THIS_MODULE); +} + +static void acc_vf_reset(void *device_data) +{ + struct acc_vf_migration *acc_vf_dev = + vfio_pci_vendor_data(device_data); + struct hisi_qm *qm = acc_vf_dev->vf_qm; + struct device *dev = &qm->pdev->dev; + u32 vf_state = QM_NOT_READY; + int ret; + + dev_info(dev, "QEMU prepare to Reset Guest!\n"); + ret = qm_write_reg(qm, QM_VF_STATE, &vf_state, 1); + if (ret) + dev_err(dev, "failed to write QM_VF_STATE\n"); +} + +static long acc_vf_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case VFIO_DEVICE_GET_REGION_INFO: + return acc_vf_get_region_info(device_data, cmd, arg); + case VFIO_DEVICE_RESET: + acc_vf_reset(device_data); + return vfio_pci_ioctl(device_data, cmd, arg); + default: + return vfio_pci_ioctl(device_data, cmd, arg); + } +} + +static ssize_t acc_vf_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos) +{ + struct acc_vf_migration *acc_vf_dev = + vfio_pci_vendor_data(device_data); + int num_vdev_regions = vfio_pci_num_regions(device_data); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + int num_vendor_region = acc_vf_dev->num_regions; + struct acc_vf_region *region; + + if (index >= VFIO_PCI_NUM_REGIONS + num_vdev_regions + + num_vendor_region) { + dev_err(&acc_vf_dev->vf_dev->dev, + "failed to check read regions index!\n"); + return -EINVAL; + } + + if (index < VFIO_PCI_NUM_REGIONS + num_vdev_regions) + return vfio_pci_read(device_data, buf, count, ppos); + + index -= VFIO_PCI_NUM_REGIONS + num_vdev_regions; + + region = &acc_vf_dev->regions[index]; + if (!region->ops->rw) { + dev_err(&acc_vf_dev->vf_dev->dev, + "failed to check regions read ops!\n"); + return -EINVAL; + } + + return region->ops->rw(acc_vf_dev, buf, count, ppos, false); +} + +static ssize_t acc_vf_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct acc_vf_migration *acc_vf_dev = + vfio_pci_vendor_data(device_data); + int num_vdev_regions = vfio_pci_num_regions(device_data); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + int num_vendor_region = acc_vf_dev->num_regions; + struct acc_vf_region *region; + + if (index == VFIO_PCI_BAR0_REGION_INDEX) + pr_debug("vfio bar 0 write\n"); + + if (index >= VFIO_PCI_NUM_REGIONS + num_vdev_regions + + num_vendor_region) { + dev_err(&acc_vf_dev->vf_dev->dev, + "failed to check write regions index!\n"); + return -EINVAL; + } + + if (index < VFIO_PCI_NUM_REGIONS + num_vdev_regions) + return vfio_pci_write(device_data, buf, count, ppos); + + index -= VFIO_PCI_NUM_REGIONS + num_vdev_regions; + + region = &acc_vf_dev->regions[index]; + + if (!region->ops->rw) { + dev_err(&acc_vf_dev->vf_dev->dev, + "failed to check regions write ops!\n"); + return -EINVAL; + } + + return region->ops->rw(acc_vf_dev, (char __user *)buf, + count, ppos, true); +} + +static int acc_vf_mmap(void *device_data, struct vm_area_struct *vma) +{ + return vfio_pci_mmap(device_data, vma); +} + +static void acc_vf_request(void *device_data, unsigned int count) +{ + vfio_pci_request(device_data, count); +} + +static struct vfio_device_ops acc_vf_device_ops_node = { + .name = "acc_vf", + .open = acc_vf_open, + .release = acc_vf_release, + .ioctl = acc_vf_ioctl, + .read = acc_vf_read, + .write = acc_vf_write, + .mmap = acc_vf_mmap, + .request = acc_vf_request, +}; + +static ssize_t acc_vf_debug_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + char buf[VFIO_DEV_DBG_LEN]; + int len; + + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "echo 0: test vf data store\n" + "echo 1: test vf data writeback\n" + "echo 2: test vf send mailbox\n" + "echo 3: dump vf dev data\n" + "echo 4: dump migration state\n"); + + return simple_read_from_buffer(buffer, count, pos, buf, len); +} + +static ssize_t acc_vf_debug_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct acc_vf_migration *acc_vf_dev = filp->private_data; + struct device *dev = &acc_vf_dev->vf_dev->dev; + struct hisi_qm *qm = acc_vf_dev->vf_qm; + char tbuf[VFIO_DEV_DBG_LEN]; + unsigned long val; + u64 data; + int len, ret; + + if (*pos) + return 0; + + if (count >= VFIO_DEV_DBG_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, VFIO_DEV_DBG_LEN - 1, + pos, buffer, count); + if (len < 0) + return len; + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + switch (val) { + case STATE_SAVE: + ret = vf_qm_state_save(qm, acc_vf_dev); + if (ret) + return -EINVAL; + break; + case STATE_RESUME: + ret = vf_qm_state_resume(qm, acc_vf_dev); + if (ret) + return -EINVAL; + break; + case MB_TEST: + data = readl(qm->io_base + QM_MB_CMD_SEND_BASE); + dev_info(dev, "debug mailbox addr: 0x%lx, mailbox val: 0x%llx\n", + (uintptr_t)qm->phys_base, data); + break; + case MIG_DATA_DUMP: + dev_info(dev, "dumped vf migration data:\n"); + print_hex_dump(KERN_INFO, "Mig Data:", DUMP_PREFIX_OFFSET, + VFIO_DBG_LOG_LEN, 1, + (unsigned char *)acc_vf_dev->vf_data, + sizeof(struct acc_vf_data), false); + break; + case MIG_DEV_SHOW: + if (!acc_vf_dev->mig_ctl) + dev_info(dev, "migration region have release!\n"); + else + dev_info(dev, + "device state: %u\n" + "data offset: %llu\n" + "data size: %llu\n" + "pending bytes: %llu\n" + "data addr: 0x%lx\n", + acc_vf_dev->mig_ctl->device_state, + acc_vf_dev->mig_ctl->data_offset, + acc_vf_dev->mig_ctl->data_size, + acc_vf_dev->mig_ctl->pending_bytes, + (uintptr_t)acc_vf_dev->vf_data); + break; + default: + return -EINVAL; + } + + return count; +} + +static const struct file_operations acc_vf_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = acc_vf_debug_read, + .write = acc_vf_debug_write, +}; + +static ssize_t acc_vf_state_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + struct acc_vf_migration *acc_vf_dev = filp->private_data; + char buf[VFIO_DEV_DBG_LEN]; + u32 state; + int len; + + if (!acc_vf_dev->mig_ctl) { + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", "Invalid\n"); + } else { + state = acc_vf_dev->mig_ctl->device_state; + switch (state) { + case VFIO_DEVICE_STATE_RUNNING: + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "RUNNING\n"); + break; + case VFIO_DEVICE_STATE_SAVING | VFIO_DEVICE_STATE_RUNNING: + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "SAVING and RUNNING\n"); + break; + case VFIO_DEVICE_STATE_SAVING: + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "SAVING\n"); + break; + case VFIO_DEVICE_STATE_STOP: + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "STOP\n"); + break; + case VFIO_DEVICE_STATE_RESUMING: + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "RESUMING\n"); + break; + default: + len = scnprintf(buf, VFIO_DEV_DBG_LEN, "%s\n", + "Error\n"); + } + } + + return simple_read_from_buffer(buffer, count, pos, buf, len); +} + +static const struct file_operations acc_vf_state_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = acc_vf_state_read, +}; + +static void vf_debugfs_init(struct acc_vf_migration *acc_vf_dev) +{ + char name[VFIO_DEV_DBG_LEN]; + int node_id; + + if (!mig_root_ref) + mig_debugfs_root = debugfs_create_dir("vfio_acc", NULL); + mutex_lock(&acc_vf_dev->reflock); + mig_root_ref++; + mutex_unlock(&acc_vf_dev->reflock); + + node_id = dev_to_node(&acc_vf_dev->vf_dev->dev); + if (node_id < 0) + node_id = 0; + + if (acc_vf_dev->acc_type == HISI_SEC) + scnprintf(name, VFIO_DEV_DBG_LEN, "sec_vf%d-%d", + node_id, acc_vf_dev->vf_id); + else if (acc_vf_dev->acc_type == HISI_HPRE) + scnprintf(name, VFIO_DEV_DBG_LEN, "hpre_vf%d-%d", + node_id, acc_vf_dev->vf_id); + else + scnprintf(name, VFIO_DEV_DBG_LEN, "zip_vf%d-%d", + node_id, acc_vf_dev->vf_id); + + acc_vf_dev->debug_root = debugfs_create_dir(name, mig_debugfs_root); + + debugfs_create_file("debug", 0644, acc_vf_dev->debug_root, + acc_vf_dev, &acc_vf_debug_fops); + debugfs_create_file("state", 0444, acc_vf_dev->debug_root, + acc_vf_dev, &acc_vf_state_fops); +} + +static void vf_debugfs_exit(struct acc_vf_migration *acc_vf_dev) +{ + debugfs_remove_recursive(acc_vf_dev->debug_root); + + mutex_lock(&acc_vf_dev->reflock); + mig_root_ref--; + mutex_unlock(&acc_vf_dev->reflock); + + if (!mig_root_ref) + debugfs_remove_recursive(mig_debugfs_root); +} + +static int qm_acc_type_init(struct acc_vf_migration *acc_vf_dev) +{ + struct hisi_qm *qm = acc_vf_dev->vf_qm; + int i; + + acc_vf_dev->acc_type = 0; + for (i = 0; i < ARRAY_SIZE(vf_acc_types); i++) { + if (!strncmp(qm->dev_name, vf_acc_types[i].name, + strlen(vf_acc_types[i].name))) + acc_vf_dev->acc_type = vf_acc_types[i].type; + } + if (!acc_vf_dev->acc_type) { + dev_err(&acc_vf_dev->vf_dev->dev, "failed to check acc type!\n"); + return -EINVAL; + } + + return 0; +} + +static int vf_qm_pci_init(struct pci_dev *pdev, struct hisi_qm *vfqm) +{ + struct device *dev = &pdev->dev; + u32 val; + int ret; + + ret = pci_request_mem_regions(pdev, vfqm->dev_name); + if (ret < 0) { + dev_err(dev, "failed to request mem regions!\n"); + return ret; + } + + vfqm->phys_base = pci_resource_start(pdev, PCI_BAR_2); + vfqm->io_base = devm_ioremap(dev, pci_resource_start(pdev, PCI_BAR_2), + pci_resource_len(pdev, PCI_BAR_2)); + if (!vfqm->io_base) { + ret = -EIO; + goto err_ioremap; + } + + val = readl(vfqm->io_base + QM_QUE_ISO_CFG_V); + val = val & BIT(0); + if (val) { + vfqm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); + vfqm->db_io_base = devm_ioremap(dev, pci_resource_start(pdev, + PCI_BAR_4), pci_resource_len(pdev, PCI_BAR_4)); + if (!vfqm->db_io_base) { + ret = -EIO; + goto err_db_ioremap; + } + } else { + vfqm->db_phys_base = vfqm->phys_base; + vfqm->db_io_base = vfqm->io_base; + } + + vfqm->pdev = pdev; + mutex_init(&vfqm->mailbox_lock); + + /* + * Allow VF devices to be loaded in VM when + * it loaded in migration driver + */ + pci_release_mem_regions(pdev); + + return 0; + +err_db_ioremap: + devm_iounmap(dev, vfqm->io_base); +err_ioremap: + pci_release_mem_regions(pdev); + return ret; +} + +static int acc_vf_dev_init(struct pci_dev *pdev, struct hisi_qm *pf_qm, + struct acc_vf_migration *acc_vf_dev) +{ + struct hisi_qm *vf_qm; + int ret; + + vf_qm = kzalloc(sizeof(struct hisi_qm), GFP_KERNEL); + if (!vf_qm) + return -ENOMEM; + + /* get vf qm dev name from pf */ + vf_qm->dev_name = pf_qm->dev_name; + vf_qm->fun_type = QM_HW_VF; + acc_vf_dev->vf_qm = vf_qm; + acc_vf_dev->pf_qm = pf_qm; + + ret = vf_qm_pci_init(pdev, vf_qm); + if (ret) + goto init_qm_error; + + ret = qm_acc_type_init(acc_vf_dev); + if (ret) + goto init_qm_error; + + return 0; + +init_qm_error: + kfree(vf_qm); + return -ENOMEM; +} + +static int hisi_acc_get_vf_id(struct pci_dev *dev) +{ + struct pci_dev *pf; + + if (!dev->is_virtfn) + return -EINVAL; + + pf = pci_physfn(dev); + return (((dev->bus->number << 8) + dev->devfn) - + ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) / + pf->sriov->stride; +} + +static void *acc_vf_probe(struct pci_dev *pdev) +{ + struct acc_vf_migration *acc_vf_dev; + struct pci_dev *pf_dev, *vf_dev; + struct hisi_qm *pf_qm; + int vf_id, ret; + + pf_dev = pdev->physfn; + vf_dev = pdev; + /* + * the VF driver have been remove after unbind + * the PF driver have probe + */ + pf_qm = pci_get_drvdata(pf_dev); + if (!pf_qm) { + dev_err(&pdev->dev, "host qm driver not insmod!\n"); + return ERR_PTR(-EINVAL); + } + if (pf_qm->ver < QM_HW_V3) { + dev_err(&pdev->dev, + "device can't support migration! version: 0x%x\n", + pf_qm->ver); + return ERR_PTR(-EINVAL); + } + + vf_id = hisi_acc_get_vf_id(vf_dev); + if (vf_id < 0) { + dev_info(&pdev->dev, "vf device: %s, vf id: %d\n", + pf_qm->dev_name, vf_id); + return ERR_PTR(-EINVAL); + } + + acc_vf_dev = kzalloc(sizeof(*acc_vf_dev), GFP_KERNEL); + if (!acc_vf_dev) + return ERR_PTR(-ENOMEM); + + ret = acc_vf_dev_init(pdev, pf_qm, acc_vf_dev); + if (ret) { + kfree(acc_vf_dev); + return ERR_PTR(-ENOMEM); + } + + acc_vf_dev->vf_id = vf_id + 1; + acc_vf_dev->vf_vendor = pdev->vendor; + acc_vf_dev->vf_device = pdev->device; + acc_vf_dev->pf_dev = pf_dev; + acc_vf_dev->vf_dev = vf_dev; + acc_vf_dev->mig_ignore = false; + mutex_init(&acc_vf_dev->reflock); + + vf_debugfs_init(acc_vf_dev); + + return acc_vf_dev; +} + +static void acc_vf_remove(void *vendor_data) +{ + struct acc_vf_migration *acc_vf_dev = vendor_data; + struct device *dev = &acc_vf_dev->vf_dev->dev; + struct hisi_qm *qm = acc_vf_dev->vf_qm; + + vf_debugfs_exit(acc_vf_dev); + + devm_iounmap(dev, qm->io_base); + + kfree(qm); + kfree(acc_vf_dev); +} + +static struct vfio_pci_vendor_driver_ops sec_vf_mig_ops = { + .owner = THIS_MODULE, + .name = "hisi_sec2", + .vendor = PCI_VENDOR_ID_HUAWEI, + .device = PCI_DEVICE_ID_HUAWEI_SEC_VF, + .probe = acc_vf_probe, + .remove = acc_vf_remove, + .device_ops = &acc_vf_device_ops_node, +}; + +static struct vfio_pci_vendor_driver_ops hpre_vf_mig_ops = { + .owner = THIS_MODULE, + .name = "hisi_hpre", + .vendor = PCI_VENDOR_ID_HUAWEI, + .device = PCI_DEVICE_ID_HUAWEI_HPRE_VF, + .probe = acc_vf_probe, + .remove = acc_vf_remove, + .device_ops = &acc_vf_device_ops_node, +}; + +static struct vfio_pci_vendor_driver_ops zip_vf_mig_ops = { + .owner = THIS_MODULE, + .name = "hisi_zip", + .vendor = PCI_VENDOR_ID_HUAWEI, + .device = PCI_DEVICE_ID_HUAWEI_ZIP_VF, + .probe = acc_vf_probe, + .remove = acc_vf_remove, + .device_ops = &acc_vf_device_ops_node, +}; + +static int __init acc_vf_module_init(void) +{ + __vfio_pci_register_vendor_driver(&sec_vf_mig_ops); + + __vfio_pci_register_vendor_driver(&hpre_vf_mig_ops); + + __vfio_pci_register_vendor_driver(&zip_vf_mig_ops); + + return 0; +}; + +static void __exit acc_vf_module_exit(void) +{ + vfio_pci_unregister_vendor_driver(&sec_vf_mig_ops); + vfio_pci_unregister_vendor_driver(&hpre_vf_mig_ops); + vfio_pci_unregister_vendor_driver(&zip_vf_mig_ops); +}; +module_init(acc_vf_module_init); +module_exit(acc_vf_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Longfang Liu "); +MODULE_DESCRIPTION("HiSilicon Accelerator VF live migration driver"); diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/acc_vf_migration.h b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/acc_vf_migration.h new file mode 100644 index 0000000000000000000000000000000000000000..a2368ba22ffd204444852f2f73b847a48aa245e8 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/migration/acc_vf_migration.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 HiSilicon Limited. */ + +#ifndef ACC_MIG_H +#define ACC_MIG_H + +#include +#include +#include "../vfio.h" + +#include "../hisi_acc_qm.h" + +#define VFIO_PCI_OFFSET_SHIFT 40 +#define VFIO_PCI_OFFSET_TO_INDEX(off) ((off) >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) + +#define MIGRATION_REGION_SZ (sizeof(struct acc_vf_data) + \ + sizeof(struct vfio_device_migration_info)) +#define VFIO_DEV_DBG_LEN 256 +#define VFIO_DBG_LOG_LEN 16 +#define VFIO_DEVFN_MASK 0xFF + +#define PCI_BAR_2 2 +#define PCI_BAR_4 4 +#define POLL_PERIOD 10 +#define POLL_TIMEOUT 1000 +#define QM_CACHE_WB_START 0x204 +#define QM_CACHE_WB_DONE 0x208 +#define QM_MB_CMD_PAUSE_QM 0xe +#define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_IFC_INT_STATUS 0x0028 +#define SEC_CORE_INT_STATUS 0x301008 +#define HPRE_HAC_INT_STATUS 0x301800 +#define HZIP_CORE_INT_STATUS 0x3010AC + +#define QM_VFT_CFG_RDY 0x10006c +#define QM_VFT_CFG_OP_WR 0x100058 +#define QM_VFT_CFG_TYPE 0x10005c +#define QM_VFT_CFG 0x100060 +#define QM_VFT_CFG_OP_ENABLE 0x100054 +#define QM_VFT_CFG_DATA_L 0x100064 +#define QM_VFT_CFG_DATA_H 0x100068 + +#define ERROR_CHECK_TIMEOUT 100 +#define CHECK_DELAY_TIME 100 + +#define QM_SQC_VFT_BASE_SHIFT_V2 28 +#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) +#define QM_SQC_VFT_NUM_SHIFT_V2 45 +#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) + +/* mailbox */ +#define QM_MB_CMD_SQC_BT 0x4 +#define QM_MB_CMD_CQC_BT 0x5 +#define QM_MB_CMD_SQC_VFT_V2 0x6 + +#define QM_MB_CMD_SEND_BASE 0x300 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_OP_SHIFT 14 +#define QM_MB_WAIT_READY_CNT 10 +#define QM_MB_MAX_WAIT_CNT 3000 +#define WAIT_PERIOD_US_MIN 100 +#define WAIT_PERIOD_US_MAX 200 +#define QM_MB_STATUS_MASK GENMASK(12, 9) + +/* doorbell */ +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 +#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_DB_CMD_SHIFT_V2 12 +#define QM_DB_RAND_SHIFT_V2 16 +#define QM_DB_INDEX_SHIFT_V2 32 +#define QM_DB_PRIORITY_SHIFT_V2 48 + +/* RW regs */ +#define QM_REGS_MAX_LEN 7 +#define QM_REG_ADDR_OFFSET 0x0004 + +#define QM_XQC_ADDR_OFFSET 32U +#define QM_XQC_ADDR_LOW 0x1 +#define QM_XQC_ADDR_HIGH 0x2 +#define QM_VF_AEQ_INT_MASK 0x0004 +#define QM_VF_EQ_INT_MASK 0x000c +#define QM_IFC_INT_SOURCE_V 0x0020 +#define QM_IFC_INT_MASK 0x0024 +#define QM_IFC_INT_SET_V 0x002c +#define QM_QUE_ISO_CFG_V 0x0030 +#define QM_PAGE_SIZE 0x0034 + +#define QM_EQC_DW0 0X8000 +#define QM_AEQC_DW0 0X8020 + +struct qm_mailbox { + __le16 w0; + __le16 queue_num; + __le32 base_l; + __le32 base_h; + __le32 rsvd; +}; + +enum acc_type { + HISI_SEC = 0x1, + HISI_HPRE = 0x2, + HISI_ZIP = 0x3, +}; + +struct vf_acc_type { + const char *name; + u32 type; +}; + +static struct vf_acc_type vf_acc_types[] = { + {"hisi_sec2", HISI_SEC}, + {"hisi_hpre", HISI_HPRE}, + {"hisi_zip", HISI_ZIP}, +}; + +enum mig_debug_cmd { + STATE_SAVE, + STATE_RESUME, + MB_TEST, + MIG_DATA_DUMP, + MIG_DEV_SHOW, +}; + +static const char * const vf_dev_state[] = { + "Stop", + "Running", + "Saving", + "Running & Saving", + "Resuming", +}; + +#define QM_MATCH_SIZE 32L +struct acc_vf_data { + /* QM match information */ + u32 qp_num; + u32 acc_type; + u32 que_iso_cfg; + u32 qp_base; + /* QM reserved 4 match information */ + u32 qm_rsv_state[4]; + + /* QM RW regs */ + u32 aeq_int_mask; + u32 eq_int_mask; + u32 ifc_int_source; + u32 ifc_int_mask; + u32 ifc_int_set; + u32 page_size; + u32 vf_state; + + /* + * QM_VF_MB has 4 regs don't need to migration + * mailbox regs writeback value will cause + * hardware to perform command operations + */ + + /* QM_EQC_DW has 7 regs */ + u32 qm_eqc_dw[7]; + + /* QM_AEQC_DW has 7 regs */ + u32 qm_aeqc_dw[7]; + + /* QM reserved 5 regs */ + u32 qm_rsv_regs[5]; + + /* qm memory init information */ + dma_addr_t eqe_dma; + dma_addr_t aeqe_dma; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; +}; + +struct acc_vf_remap_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *sync; + atomic_t cnt; + wait_queue_head_t waitq; + bool init; +}; + +struct acc_vf_migration { + __u32 vf_vendor; + __u32 vf_device; + __u32 handle; + struct pci_dev *pf_dev; + struct pci_dev *vf_dev; + struct hisi_qm *pf_qm; + struct hisi_qm *vf_qm; + int vf_id; + int refcnt; + u8 acc_type; + bool mig_ignore; + struct mutex reflock; + + struct vfio_device_migration_info *mig_ctl; + struct acc_vf_data *vf_data; + bool in_dirty_track; + struct acc_vf_remap_irq_ctx remap_irq_ctx; + struct acc_vf_region *regions; + int num_regions; + struct dentry *debug_root; +}; + +struct acc_vf_region_ops { + int (*rw)(struct acc_vf_migration *acc_vf_dev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite); + void (*release)(struct acc_vf_migration *acc_vf_dev, + struct acc_vf_region *region); + int (*mmap)(struct acc_vf_migration *acc_vf_dev, + struct acc_vf_region *region, + struct vm_area_struct *vma); + int (*add_cap)(struct acc_vf_migration *acc_vf_dev, + struct acc_vf_region *region, + struct vfio_info_cap *caps); +}; + +/* Single Root I/O Virtualization */ +struct pci_sriov { + int pos; /* Capability position */ + int nres; /* Number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_VFs; /* Total VFs associated with the PF */ + u16 initial_VFs; /* Initial VFs associated with the PF */ + u16 num_VFs; /* Number of VFs available */ + u16 offset; /* First VF Routing ID offset */ + u16 stride; /* Following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* Page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + u8 max_VF_buses; /* Max buses consumed by VFs */ + u16 driver_max_VFs; /* Max num VFs driver supports */ + struct pci_dev *dev; /* Lowest numbered PF */ + struct pci_dev *self; /* This PF */ + u32 class; /* VF device */ + u8 hdr_type; /* VF header type */ + u16 subsystem_vendor; /* VF subsystem vendor */ + u16 subsystem_device; /* VF subsystem device */ + resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + bool drivers_autoprobe; /* Auto probing of VFs by driver */ +}; + +struct acc_vf_region { + u32 type; + u32 subtype; + size_t size; + u32 flags; + const struct acc_vf_region_ops *ops; + void *data; +}; + +struct acc_vf_irqops { + int (*set_irqs)(struct acc_vf_migration *acc_vf_dev, + u32 flags, unsigned int index, + unsigned int start, unsigned int count, + void *data); +}; + +struct acc_vf_irq { + u32 type; + u32 subtype; + u32 flags; + u32 count; + const struct acc_vf_irqops *ops; +}; + +#endif /* ACC_MIG_H */ diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/vfio.h b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/vfio.h new file mode 100644 index 0000000000000000000000000000000000000000..34899305cb6e06c5dbed3d075473ddc91e96a842 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-5.10/hisilicon/vfio.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VFIO API definition + * + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson + */ +#ifndef VFIO_H +#define VFIO_H + + +#include +#include +#include +#include +#include + +struct vfio_device { + struct device *dev; + const struct vfio_device_ops *ops; + struct vfio_group *group; + + /* Members below here are private, not for driver use */ + refcount_t refcount; + struct completion comp; + struct list_head group_next; + void *device_data; +}; + +/** + * struct vfio_device_ops - VFIO bus driver device callbacks + * + * @open: Called when userspace creates new file descriptor for device + * @release: Called when userspace releases file descriptor for device + * @read: Perform read(2) on device file descriptor + * @write: Perform write(2) on device file descriptor + * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* + * operations documented below + * @mmap: Perform mmap(2) on a region of the device file descriptor + * @request: Request for the bus driver to release the device + * @match: Optional device name match callback (return: 0 for no-match, >0 for + * match, -errno for abort (ex. match with insufficient or incorrect + * additional args) + */ +struct vfio_device_ops { + char *name; + int (*open)(void *device_data); + void (*release)(void *device_data); + ssize_t (*read)(void *device_data, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*write)(void *device_data, const char __user *buf, + size_t count, loff_t *size); + long (*ioctl)(void *device_data, unsigned int cmd, + unsigned long arg); + int (*mmap)(void *device_data, struct vm_area_struct *vma); + void (*request)(void *device_data, unsigned int count); + int (*match)(void *device_data, char *buf); +}; + +extern struct iommu_group *vfio_iommu_group_get(struct device *dev); +extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); + +void vfio_init_group_dev(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops, void *device_data); +int vfio_register_group_dev(struct vfio_device *device); +extern int vfio_add_group_dev(struct device *dev, + const struct vfio_device_ops *ops, + void *device_data); + +extern void *vfio_del_group_dev(struct device *dev); +void vfio_unregister_group_dev(struct vfio_device *device); +extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); +extern void vfio_device_put(struct vfio_device *device); +extern void *vfio_device_data(struct vfio_device *device); + +/** + * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks + */ +struct vfio_iommu_driver_ops { + char *name; + struct module *owner; + void *(*open)(unsigned long arg); + void (*release)(void *iommu_data); + ssize_t (*read)(void *iommu_data, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*write)(void *iommu_data, const char __user *buf, + size_t count, loff_t *size); + long (*ioctl)(void *iommu_data, unsigned int cmd, + unsigned long arg); + int (*mmap)(void *iommu_data, struct vm_area_struct *vma); + int (*attach_group)(void *iommu_data, + struct iommu_group *group); + void (*detach_group)(void *iommu_data, + struct iommu_group *group); + int (*pin_pages)(void *iommu_data, + struct iommu_group *group, + unsigned long *user_pfn, + int npage, int prot, + unsigned long *phys_pfn); + int (*unpin_pages)(void *iommu_data, + unsigned long *user_pfn, int npage); + int (*register_notifier)(void *iommu_data, + unsigned long *events, + struct notifier_block *nb); + int (*unregister_notifier)(void *iommu_data, + struct notifier_block *nb); + int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, + void *data, size_t count, bool write); + KABI_EXTEND(struct iommu_domain *(*group_iommu_domain)(void *iommu_data, + struct iommu_group *group)) +}; + +extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); + +extern void vfio_unregister_iommu_driver( + const struct vfio_iommu_driver_ops *ops); + +/* + * External user API + */ +extern struct vfio_group *vfio_group_get_external_user(struct file *filep); +extern void vfio_group_put_external_user(struct vfio_group *group); +extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device + *dev); +extern bool vfio_external_group_match_file(struct vfio_group *group, + struct file *filep); +extern int vfio_external_user_iommu_id(struct vfio_group *group); +extern long vfio_external_check_extension(struct vfio_group *group, + unsigned long arg); + +#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) + +extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, + int npage, int prot, unsigned long *phys_pfn); +extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, + int npage); + +extern int vfio_group_pin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage, + int prot, unsigned long *phys_pfn); +extern int vfio_group_unpin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage); + +extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, + void *data, size_t len, bool write); + +extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group); + +/* each type has independent events */ +enum vfio_notify_type { + VFIO_IOMMU_NOTIFY = 0, + VFIO_GROUP_NOTIFY = 1, +}; + +/* events for VFIO_IOMMU_NOTIFY */ +#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) + +/* events for VFIO_GROUP_NOTIFY */ +#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) + +extern int vfio_register_notifier(struct device *dev, + enum vfio_notify_type type, + unsigned long *required_events, + struct notifier_block *nb); +extern int vfio_unregister_notifier(struct device *dev, + enum vfio_notify_type type, + struct notifier_block *nb); + +struct kvm; +extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); + +/* + * Sub-module helpers + */ +struct vfio_info_cap { + struct vfio_info_cap_header *buf; + size_t size; +}; +extern struct vfio_info_cap_header *vfio_info_cap_add( + struct vfio_info_cap *caps, size_t size, u16 id, u16 version); +extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); + +extern int vfio_info_add_capability(struct vfio_info_cap *caps, + struct vfio_info_cap_header *cap, + size_t size); + +extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, + int num_irqs, int max_irq_type, + size_t *data_size); + +struct pci_dev; +#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH) +extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); +extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); +extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, + unsigned int cmd, + unsigned long arg); +#else +static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) +{ +} + +static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) +{ +} + +static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, + unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} +#endif /* CONFIG_VFIO_SPAPR_EEH */ + +/* + * IRQfd - generic + */ +struct virqfd { + void *opaque; + struct eventfd_ctx *eventfd; + int (*handler)(void *, void *); + void (*thread)(void *, void *); + void *data; + struct work_struct inject; + wait_queue_entry_t wait; + poll_table pt; + struct work_struct shutdown; + struct work_struct flush_inject; + struct virqfd **pvirqfd; +}; + +extern int vfio_virqfd_enable(void *opaque, + int (*handler)(void *, void *), + void (*thread)(void *, void *), + void *data, struct virqfd **pvirqfd, int fd); +extern void vfio_virqfd_disable(struct virqfd **pvirqfd); +void vfio_virqfd_flush_thread(struct virqfd **pvirqfd); + +extern int vfio_pci_num_regions(void *device_data); +extern struct pci_dev *vfio_pci_pdev(void *device_data); +extern long vfio_pci_ioctl(void *device_data, + unsigned int cmd, unsigned long arg); +extern ssize_t vfio_pci_read(void *device_data, char __user *buf, + size_t count, loff_t *ppos); +extern ssize_t vfio_pci_write(void *device_data, const char __user *buf, + size_t count, loff_t *ppos); +extern int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma); +extern void vfio_pci_request(void *device_data, unsigned int count); +extern int vfio_pci_open(void *device_data); +extern void vfio_pci_release(void *device_data); +extern void *vfio_pci_vendor_data(void *device_data); +extern int vfio_pci_set_vendor_regions(void *device_data, + int num_vendor_regions); + +struct vfio_pci_vendor_driver_ops { + char *name; + struct module *owner; + /* Used to match device */ + unsigned short vendor; + unsigned short device; + void *(*probe)(struct pci_dev *pdev); + void (*remove)(void *vendor_data); + struct vfio_device_ops *device_ops; +}; +int __vfio_pci_register_vendor_driver(struct vfio_pci_vendor_driver_ops *ops); +void vfio_pci_unregister_vendor_driver(struct vfio_pci_vendor_driver_ops *ops); + +#define vfio_pci_register_vendor_driver(__name, __probe, __remove, \ + __device_ops) \ +static struct vfio_pci_vendor_driver_ops __ops ## _node = { \ + .owner = THIS_MODULE, \ + .name = __name, \ + .probe = __probe, \ + .remove = __remove, \ + .device_ops = __device_ops, \ +}; \ +__vfio_pci_register_vendor_driver(&__ops ## _node) + +#define module_vfio_pci_register_vendor_handler(name, probe, remove, \ + device_ops) \ +static int __init device_ops ## _module_init(void) \ +{ \ + vfio_pci_register_vendor_driver(name, probe, remove, \ + device_ops); \ + return 0; \ +}; \ +static void __exit device_ops ## _module_exit(void) \ +{ \ + vfio_pci_unregister_vendor_driver(device_ops); \ +}; \ +module_init(device_ops ## _module_init); \ +module_exit(device_ops ## _module_exit) + +#endif /* VFIO_H */ diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/Makefile b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/Makefile index e059bb4897aa43e3e7b591f7748f66d7abcfecb2..9328cee5e573294141e2394f84bd655da9cf9a22 100644 --- a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/Makefile +++ b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/Makefile @@ -23,7 +23,8 @@ default: CONFIG_CRYPTO_DEV_HISI_ZIP=m \ CONFIG_CRYPTO_DEV_HISI_HPRE=m \ CONFIG_CRYPTO_DEV_HISI_SEC2=m \ - CONFIG_CRYPTO_DEV_HISI_TRNG=m + CONFIG_CRYPTO_DEV_HISI_TRNG=m \ + CONFIG_HISI_ACC_VFIO_PCI=m #copy: # cp -f $(shell pwd)/include_linux/uacce.h $(KSP)/include/linux # cp -f $(shell pwd)/include_uapi_linux/uacce.h $(KSP)/include/uapi/linux @@ -40,6 +41,7 @@ install: -modprobe hisi_sec2 uacce_mode=1 pf_q_num=256 -modprobe hisi_hpre uacce_mode=1 pf_q_num=256 -modprobe hisi_zip uacce_mode=1 pf_q_num=256 + -modprobe hisi_acc_vfio_pci -echo "options hisi_sec2 uacce_mode=1 pf_q_num=256" > /etc/modprobe.d/hisi_sec2.conf -echo "options hisi_hpre uacce_mode=1 pf_q_num=256" > /etc/modprobe.d/hisi_hpre.conf -echo "options hisi_zip uacce_mode=1 pf_q_num=256" > /etc/modprobe.d/hisi_zip.conf @@ -60,6 +62,7 @@ check: done uninstall: + modprobe -r hisi_acc_vfio_pci modprobe -r hisi_zip modprobe -r hisi_hpre modprobe -r hisi_sec2 @@ -70,6 +73,7 @@ uninstall: rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_sec2.ko rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_hpre.ko rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_zip.ko + rm -rf /lib/modules/$(KERNEL_VERSION_BY_BUILDENV)/extra/hisi_acc_vfio_pci.ko rm -rf /etc/modprobe.d/hisi_sec2.conf rm -rf /etc/modprobe.d/hisi_hpre.conf rm -rf /etc/modprobe.d/hisi_zip.conf @@ -86,6 +90,7 @@ nosva: -modprobe hisi_sec2 uacce_mode=2 pf_q_num=256 -modprobe hisi_hpre uacce_mode=2 pf_q_num=256 -modprobe hisi_zip uacce_mode=2 pf_q_num=256 + -modprobe hisi_acc_vfio_pci -echo "options hisi_sec2 uacce_mode=2 pf_q_num=256" > /etc/modprobe.d/hisi_sec2.conf -echo "options hisi_hpre uacce_mode=2 pf_q_num=256" > /etc/modprobe.d/hisi_hpre.conf -echo "options hisi_zip uacce_mode=2 pf_q_num=256" > /etc/modprobe.d/hisi_zip.conf diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/Makefile b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/Makefile index a62965be8633147cdfd1a666c7f6a6e565efd04e..256d958d2614554ebbf424204f081d0f58473f7b 100644 --- a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/Makefile +++ b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o hisi_qm-objs = qm.o sgl.o debugfs.o obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ +obj-$(CONFIG_HISI_ACC_VFIO_PCI) += vfio/ \ No newline at end of file diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/Kconfig b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..cbf1c32f6ebff7388186dd46e11eb5176193d901 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +config HISI_ACC_VFIO_PCI + tristate "VFIO support for HiSilicon ACC PCI devices" + depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on PCI_MSI + depends on CRYPTO_DEV_HISI_QM + depends on CRYPTO_DEV_HISI_HPRE + depends on CRYPTO_DEV_HISI_SEC2 + depends on CRYPTO_DEV_HISI_ZIP + select VFIO_PCI_CORE + help + This provides generic PCI support for HiSilicon ACC devices + using the VFIO framework. + + If you don't know what to do here, say N. diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/Makefile b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c66b3783f2f98bd3b941153b19adb8282a149822 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisi-acc-vfio-pci.o +hisi-acc-vfio-pci-y := hisi_acc_vfio_pci.o + diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/hisi_acc_vfio_pci.c b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/hisi_acc_vfio_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..33a3db01ed04560570bc025c435da751e24eeaa1 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/hisi_acc_vfio_pci.c @@ -0,0 +1,1750 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, HiSilicon Ltd. + */ + +#include +#include +#include +#include "../hisi_acc_qm.h" +#include +#include +#include +#include +#include +#include + +#include "hisi_acc_vfio_pci.h" + +/* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */ +static int qm_wait_dev_not_ready(struct hisi_qm *qm) +{ + u32 val; + + return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE, + val, !(val & 0x1), MB_POLL_PERIOD_US, + MB_POLL_TIMEOUT_US); +} + +/* + * Each state Reg is checked 100 times, + * with a delay of 100 microseconds after each check + */ +static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs) +{ + int check_times = 0; + u32 state; + + state = readl(qm->io_base + regs); + while (state && check_times < ERROR_CHECK_TIMEOUT) { + udelay(CHECK_DELAY_TIME); + state = readl(qm->io_base + regs); + check_times++; + } + + return state; +} + +static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr, + u32 *data, u8 nums) +{ + int i; + + if (nums < 1 || nums > QM_REGS_MAX_LEN) + return -EINVAL; + + for (i = 0; i < nums; i++) { + data[i] = readl(qm->io_base + reg_addr); + reg_addr += QM_REG_ADDR_OFFSET; + } + + return 0; +} + +static int qm_write_regs(struct hisi_qm *qm, u32 reg, + u32 *data, u8 nums) +{ + int i; + + if (nums < 1 || nums > QM_REGS_MAX_LEN) + return -EINVAL; + + for (i = 0; i < nums; i++) + writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET); + + return 0; +} + +static int qm_get_vft(struct hisi_qm *qm, u32 *base) +{ + u64 sqc_vft; + u32 qp_num; + int ret; + + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); + if (ret) + return ret; + + *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + qp_num = (QM_SQC_VFT_NUM_MASK_V2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; + + return qp_num; +} + +static int qm_get_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct acc_vf_data *vf_data) +{ + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &qm->pdev->dev; + u32 eqc_addr, aeqc_addr; + int ret; + + if (qm->ver == QM_HW_V3) { + eqc_addr = QM_EQC_DW0; + aeqc_addr = QM_AEQC_DW0; + } else { + eqc_addr = QM_EQC_PF_DW0; + aeqc_addr = QM_AEQC_PF_DW0; + } + + /* QM_EQC_DW has 7 regs */ + ret = qm_read_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7); + if (ret) { + dev_err(dev, "failed to read QM_EQC_DW\n"); + return ret; + } + + /* QM_AEQC_DW has 7 regs */ + ret = qm_read_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7); + if (ret) { + dev_err(dev, "failed to read QM_AEQC_DW\n"); + return ret; + } + + return 0; +} + +static int qm_set_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct acc_vf_data *vf_data) +{ + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &qm->pdev->dev; + u32 eqc_addr, aeqc_addr; + int ret; + + if (qm->ver == QM_HW_V3) { + eqc_addr = QM_EQC_DW0; + aeqc_addr = QM_AEQC_DW0; + } else { + eqc_addr = QM_EQC_PF_DW0; + aeqc_addr = QM_AEQC_PF_DW0; + } + + /* QM_EQC_DW has 7 regs */ + ret = qm_write_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7); + if (ret) { + dev_err(dev, "failed to write QM_EQC_DW\n"); + return ret; + } + + /* QM_AEQC_DW has 7 regs */ + ret = qm_write_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7); + if (ret) { + dev_err(dev, "failed to write QM_AEQC_DW\n"); + return ret; + } + + return 0; +} + +static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n"); + return ret; + } + + ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n"); + return ret; + } + + ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V, + &vf_data->ifc_int_source, 1); + if (ret) { + dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n"); + return ret; + } + + ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1); + if (ret) { + dev_err(dev, "failed to read QM_IFC_INT_MASK\n"); + return ret; + } + + ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1); + if (ret) { + dev_err(dev, "failed to read QM_IFC_INT_SET_V\n"); + return ret; + } + + ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1); + if (ret) { + dev_err(dev, "failed to read QM_PAGE_SIZE\n"); + return ret; + } + + return 0; +} + +static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + /* Check VF state */ + if (unlikely(hisi_qm_wait_mb_ready(qm))) { + dev_err(&qm->pdev->dev, "QM device is not ready to write\n"); + return -EBUSY; + } + + ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n"); + return ret; + } + + ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n"); + return ret; + } + + ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V, + &vf_data->ifc_int_source, 1); + if (ret) { + dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n"); + return ret; + } + + ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1); + if (ret) { + dev_err(dev, "failed to write QM_IFC_INT_MASK\n"); + return ret; + } + + ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1); + if (ret) { + dev_err(dev, "failed to write QM_IFC_INT_SET_V\n"); + return ret; + } + + ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1); + if (ret) { + dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n"); + return ret; + } + + ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1); + if (ret) { + dev_err(dev, "failed to write QM_PAGE_SIZE\n"); + return ret; + } + + return 0; +} + +static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, + u16 index, u8 priority) +{ + u64 doorbell; + u64 dbase; + u16 randata = 0; + + if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) + dbase = QM_DOORBELL_SQ_CQ_BASE_V2; + else + dbase = QM_DOORBELL_EQ_AEQ_BASE_V2; + + doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | + ((u64)randata << QM_DB_RAND_SHIFT_V2) | + ((u64)index << QM_DB_INDEX_SHIFT_V2) | + ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); + + writeq(doorbell, qm->io_base + dbase); +} + +static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase) +{ + unsigned int val; + u64 sqc_vft; + u32 qp_num; + int ret; + + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), MB_POLL_PERIOD_US, + MB_POLL_TIMEOUT_US); + if (ret) + return ret; + + writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); + /* 0 mean SQC VFT */ + writel(0x0, qm->io_base + QM_VFT_CFG_TYPE); + writel(vf_id, qm->io_base + QM_VFT_CFG); + + writel(0x0, qm->io_base + QM_VFT_CFG_RDY); + writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); + + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), MB_POLL_PERIOD_US, + MB_POLL_TIMEOUT_US); + if (ret) + return ret; + + sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | + ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << + QM_XQC_ADDR_OFFSET); + *rbase = QM_SQC_VFT_BASE_MASK_V2 & + (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + qp_num = (QM_SQC_VFT_NUM_MASK_V2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; + + return qp_num; +} + +static void qm_dev_cmd_init(struct hisi_qm *qm) +{ + /* Clear VF communication status registers. */ + writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V); + + /* Enable pf and vf communication. */ + writel(0x0, qm->io_base + QM_IFC_INT_MASK); +} + +static int vf_qm_cache_wb(struct hisi_qm *qm) +{ + unsigned int val; + + writel(0x1, qm->io_base + QM_CACHE_WB_START); + if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, + val, val & BIT(0), MB_POLL_PERIOD_US, + MB_POLL_TIMEOUT_US)) { + dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n"); + return -EINVAL; + } + + return 0; +} + +static void vf_qm_fun_reset(struct hisi_qm *qm) +{ + int i; + + for (i = 0; i < qm->qp_num; i++) + qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1); +} + +static int vf_qm_func_stop(struct hisi_qm *qm) +{ + return hisi_qm_mb_write(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); +} + +static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev) +{ + switch (vf_data->acc_magic) { + case ACC_DEV_MAGIC_V2: + if (vf_data->major_ver != ACC_DRV_MAJOR_VER) { + dev_info(dev, "migration driver version<%u.%u> not match!\n", + vf_data->major_ver, vf_data->minor_ver); + return -EINVAL; + } + break; + case ACC_DEV_MAGIC_V1: + /* Correct dma address */ + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm; + struct device *dev = &vf_qm->pdev->dev; + u32 que_iso_state; + int ret; + + if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done) + return 0; + + ret = vf_qm_version_check(vf_data, dev); + if (ret) { + dev_err(dev, "failed to match ACC_DEV_MAGIC\n"); + return ret; + } + + if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) { + dev_err(dev, "failed to match VF devices\n"); + return -EINVAL; + } + + /* VF qp num check */ + ret = qm_get_vft(vf_qm, &vf_qm->qp_base); + if (ret <= 0) { + dev_err(dev, "failed to get vft qp nums\n"); + return ret; + } + + if (ret != vf_data->qp_num) { + dev_err(dev, "failed to match VF qp num\n"); + return -EINVAL; + } + + vf_qm->qp_num = ret; + + /* VF isolation state check */ + ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1); + if (ret) { + dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n"); + return ret; + } + + if (vf_data->que_iso_cfg != que_iso_state) { + dev_err(dev, "failed to match isolation state\n"); + return -EINVAL; + } + + hisi_acc_vdev->match_done = true; + return 0; +} + +static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct acc_vf_data *vf_data) +{ + struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm; + struct device *dev = &pf_qm->pdev->dev; + int vf_id = hisi_acc_vdev->vf_id; + int ret; + + vf_data->acc_magic = ACC_DEV_MAGIC_V2; + vf_data->major_ver = ACC_DRV_MAJOR_VER; + vf_data->minor_ver = ACC_DRV_MINOR_VER; + /* Save device id */ + vf_data->dev_id = hisi_acc_vdev->vf_dev->device; + + /* VF qp num save from PF */ + ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base); + if (ret <= 0) { + dev_err(dev, "failed to get vft qp nums!\n"); + return -EINVAL; + } + + vf_data->qp_num = ret; + + /* VF isolation state save from PF */ + ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1); + if (ret) { + dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n"); + return ret; + } + + return 0; +} + +static void vf_qm_xeqc_save(struct hisi_qm *qm, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + u16 eq_head, aeq_head; + + eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0); + + aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0); +} + +static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *migf) +{ + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm; + struct device *dev = &qm->pdev->dev; + struct acc_vf_data *vf_data = &migf->vf_data; + int ret; + + /* Return if only match data was transferred */ + if (migf->total_length == QM_MATCH_SIZE) + return 0; + + if (migf->total_length < sizeof(struct acc_vf_data)) + return -EINVAL; + + if (!vf_data->eqe_dma || !vf_data->aeqe_dma || + !vf_data->sqc_dma || !vf_data->cqc_dma) { + dev_info(dev, "resume dma addr is NULL!\n"); + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; + return 0; + } + + ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); + if (ret) { + dev_err(dev, "failed to write QM_VF_STATE\n"); + return ret; + } + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + + qm->eqe_dma = vf_data->eqe_dma; + qm->aeqe_dma = vf_data->aeqe_dma; + qm->sqc_dma = vf_data->sqc_dma; + qm->cqc_dma = vf_data->cqc_dma; + + qm->qp_base = vf_data->qp_base; + qm->qp_num = vf_data->qp_num; + + ret = qm_set_regs(qm, vf_data); + if (ret) { + dev_err(dev, "set VF regs failed\n"); + return ret; + } + + ret = qm_set_xqc_regs(hisi_acc_vdev, vf_data); + if (ret) + return ret; + + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + if (ret) { + dev_err(dev, "set sqc failed\n"); + return ret; + } + + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + if (ret) { + dev_err(dev, "set cqc failed\n"); + return ret; + } + + qm_dev_cmd_init(qm); + return 0; +} + +static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) +{ + struct device *dev = &vf_qm->pdev->dev; + int ret; + + ret = qm_get_regs(vf_qm, vf_data); + if (ret) + return ret; + + /* Every reg is 32 bit, the dma address is 64 bit. */ + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; + vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; + + /* Through SQC_BT/CQC_BT to get sqc and cqc address */ + ret = hisi_qm_mb_read(vf_qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0); + if (ret) { + dev_err(dev, "failed to read SQC addr!\n"); + return ret; + } + + ret = hisi_qm_mb_read(vf_qm, &vf_data->cqc_dma, QM_MB_CMD_CQC_BT, 0); + if (ret) { + dev_err(dev, "failed to read CQC addr!\n"); + return ret; + } + + return 0; +} + +static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + int ret; + + if (unlikely(qm_wait_dev_not_ready(vf_qm))) { + /* Update state and return with match data */ + vf_data->vf_qm_state = QM_NOT_READY; + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + migf->total_length = QM_MATCH_SIZE; + return 0; + } + + vf_data->vf_qm_state = QM_READY; + hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; + + ret = qm_get_xqc_regs(hisi_acc_vdev, vf_data); + if (ret) + return -EINVAL; + + ret = vf_qm_read_data(vf_qm, vf_data); + if (ret) + return ret; + + migf->total_length = sizeof(struct acc_vf_data); + /* Save eqc and aeqc interrupt information */ + vf_qm_xeqc_save(vf_qm, migf); + + return 0; +} + +static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); + + return container_of(core_device, struct hisi_acc_vf_core_device, + core_device); +} + +/* Check the PF's RAS state and Function INT state */ +static int +hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm; + struct hisi_qm *qm = hisi_acc_vdev->pf_qm; + struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev; + struct device *dev = &qm->pdev->dev; + u32 state; + + /* Check RAS state */ + state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM RAS state!\n"); + return -EBUSY; + } + + /* Check Function Communication state between PF and VF */ + state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM IFC INT state!\n"); + return -EBUSY; + } + state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V); + if (state) { + dev_err(dev, "failed to check QM IFC INT SET state!\n"); + return -EBUSY; + } + + /* Check submodule task state */ + switch (vf_pdev->device) { + case PCI_DEVICE_ID_HUAWEI_SEC_VF: + state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM SEC Core INT state!\n"); + return -EBUSY; + } + return 0; + case PCI_DEVICE_ID_HUAWEI_HPRE_VF: + state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM HPRE HAC INT state!\n"); + return -EBUSY; + } + return 0; + case PCI_DEVICE_ID_HUAWEI_ZIP_VF: + state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS); + if (state) { + dev_err(dev, "failed to check QM ZIP Core INT state!\n"); + return -EBUSY; + } + return 0; + default: + dev_err(dev, "failed to detect acc module type!\n"); + return -EINVAL; + } +} + +static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf) +{ + mutex_lock(&migf->lock); + migf->disabled = true; + migf->total_length = 0; + migf->filp->f_pos = 0; + mutex_unlock(&migf->lock); +} + +static void +hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, + struct hisi_acc_vf_migration_file *src_migf) +{ + struct hisi_acc_vf_migration_file *dst_migf = hisi_acc_vdev->debug_migf; + + if (!dst_migf) + return; + + dst_migf->total_length = src_migf->total_length; + memcpy(&dst_migf->vf_data, &src_migf->vf_data, + sizeof(struct acc_vf_data)); +} + +static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + if (hisi_acc_vdev->resuming_migf) { + hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->resuming_migf); + hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf); + fput(hisi_acc_vdev->resuming_migf->filp); + hisi_acc_vdev->resuming_migf = NULL; + } + + if (hisi_acc_vdev->saving_migf) { + hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->saving_migf); + hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf); + fput(hisi_acc_vdev->saving_migf->filp); + hisi_acc_vdev->saving_migf = NULL; + } +} + +static struct hisi_acc_vf_core_device *hisi_acc_get_vf_dev(struct vfio_device *vdev) +{ + return container_of(vdev, struct hisi_acc_vf_core_device, + core_device.vdev); +} + +static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; + hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + hisi_acc_vf_disable_fds(hisi_acc_vdev); +} + +static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + + if (hisi_acc_vdev->vf_qm_state != QM_READY) + return; + + /* Make sure the device is enabled */ + qm_dev_cmd_init(vf_qm); + + vf_qm_fun_reset(vf_qm); +} + +static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct device *dev = &hisi_acc_vdev->vf_dev->dev; + struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf; + int ret; + + /* Recover data to VF */ + ret = vf_qm_load_data(hisi_acc_vdev, migf); + if (ret) { + dev_err(dev, "failed to recover the VF!\n"); + return ret; + } + + return 0; +} + +static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp) +{ + struct hisi_acc_vf_migration_file *migf = filp->private_data; + + hisi_acc_vf_disable_fd(migf); + mutex_destroy(&migf->lock); + kfree(migf); + return 0; +} + +static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct hisi_acc_vf_migration_file *migf = filp->private_data; + u8 *vf_data = (u8 *)&migf->vf_data; + loff_t requested_length; + ssize_t done = 0; + int ret; + + if (pos) + return -ESPIPE; + pos = &filp->f_pos; + + if (*pos < 0 || + check_add_overflow((loff_t)len, *pos, &requested_length)) + return -EINVAL; + + if (requested_length > sizeof(struct acc_vf_data)) + return -ENOMEM; + + mutex_lock(&migf->lock); + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + ret = copy_from_user(vf_data + *pos, buf, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *pos += len; + done = len; + migf->total_length += len; + + ret = vf_qm_check_match(migf->hisi_acc_vdev, migf); + if (ret) + done = -EFAULT; +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static const struct file_operations hisi_acc_vf_resume_fops = { + .owner = THIS_MODULE, + .write = hisi_acc_vf_resume_write, + .release = hisi_acc_vf_release_file, +}; + +static struct hisi_acc_vf_migration_file * +hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct hisi_acc_vf_migration_file *migf; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf, + O_WRONLY); + if (IS_ERR(migf->filp)) { + int err = PTR_ERR(migf->filp); + + kfree(migf); + return ERR_PTR(err); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + migf->hisi_acc_vdev = hisi_acc_vdev; + return migf; +} + +static long hisi_acc_vf_precopy_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct hisi_acc_vf_migration_file *migf = filp->private_data; + struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev; + loff_t *pos = &filp->f_pos; + struct vfio_precopy_info info; + unsigned long minsz; + int ret; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&hisi_acc_vdev->state_mutex); + if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) { + mutex_unlock(&hisi_acc_vdev->state_mutex); + return -EINVAL; + } + + mutex_lock(&migf->lock); + + if (migf->disabled) { + ret = -ENODEV; + goto out; + } + + if (*pos > migf->total_length) { + ret = -EINVAL; + goto out; + } + + info.dirty_bytes = 0; + info.initial_bytes = migf->total_length - *pos; + mutex_unlock(&migf->lock); + mutex_unlock(&hisi_acc_vdev->state_mutex); + + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +out: + mutex_unlock(&migf->lock); + mutex_unlock(&hisi_acc_vdev->state_mutex); + return ret; +} + +static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len, + loff_t *pos) +{ + struct hisi_acc_vf_migration_file *migf = filp->private_data; + ssize_t done = 0; + int ret; + + if (pos) + return -ESPIPE; + pos = &filp->f_pos; + + mutex_lock(&migf->lock); + if (*pos > migf->total_length) { + done = -EINVAL; + goto out_unlock; + } + + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + len = min_t(size_t, migf->total_length - *pos, len); + if (len) { + u8 *vf_data = (u8 *)&migf->vf_data; + + ret = copy_to_user(buf, vf_data + *pos, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *pos += len; + done = len; + } +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static const struct file_operations hisi_acc_vf_save_fops = { + .owner = THIS_MODULE, + .read = hisi_acc_vf_save_read, + .unlocked_ioctl = hisi_acc_vf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = hisi_acc_vf_release_file, +}; + +static struct hisi_acc_vf_migration_file * +hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct hisi_acc_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf, + O_RDONLY); + if (IS_ERR(migf->filp)) { + int err = PTR_ERR(migf->filp); + + kfree(migf); + return ERR_PTR(err); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + migf->hisi_acc_vdev = hisi_acc_vdev; + + ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data); + if (ret) { + fput(migf->filp); + return ERR_PTR(ret); + } + + return migf; +} + +static struct hisi_acc_vf_migration_file * +hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct hisi_acc_vf_migration_file *migf; + + migf = hisi_acc_open_saving_migf(hisi_acc_vdev); + if (IS_ERR(migf)) + return migf; + + migf->total_length = QM_MATCH_SIZE; + return migf; +} + +static struct hisi_acc_vf_migration_file * +hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open) +{ + int ret; + struct hisi_acc_vf_migration_file *migf = NULL; + + if (open) { + /* + * Userspace didn't use PRECOPY support. Hence saving_migf + * is not opened yet. + */ + migf = hisi_acc_open_saving_migf(hisi_acc_vdev); + if (IS_ERR(migf)) + return migf; + } else { + migf = hisi_acc_vdev->saving_migf; + } + + ret = vf_qm_state_save(hisi_acc_vdev, migf); + if (ret) + return ERR_PTR(ret); + + return open ? migf : NULL; +} + +static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct device *dev = &hisi_acc_vdev->vf_dev->dev; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + int ret; + + ret = vf_qm_func_stop(vf_qm); + if (ret) { + dev_err(dev, "failed to stop QM VF function!\n"); + return ret; + } + + ret = hisi_acc_check_int_state(hisi_acc_vdev); + if (ret) { + dev_err(dev, "failed to check QM INT state!\n"); + return ret; + } + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM cache!\n"); + return ret; + } + + return 0; +} + +static struct file * +hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev, + u32 new) +{ + u32 cur = hisi_acc_vdev->mig_state; + int ret; + + if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) { + struct hisi_acc_vf_migration_file *migf; + + migf = hisi_acc_vf_pre_copy(hisi_acc_vdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + hisi_acc_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct hisi_acc_vf_migration_file *migf; + + ret = hisi_acc_vf_stop_device(hisi_acc_vdev); + if (ret) + return ERR_PTR(ret); + + migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) { + ret = hisi_acc_vf_stop_device(hisi_acc_vdev); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct hisi_acc_vf_migration_file *migf; + + migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + hisi_acc_vdev->saving_migf = migf; + return migf->filp; + } + + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) { + hisi_acc_vf_disable_fds(hisi_acc_vdev); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct hisi_acc_vf_migration_file *migf; + + migf = hisi_acc_vf_pci_resume(hisi_acc_vdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + hisi_acc_vdev->resuming_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + ret = hisi_acc_vf_load_state(hisi_acc_vdev); + if (ret) + return ERR_PTR(ret); + hisi_acc_vf_disable_fds(hisi_acc_vdev); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) { + hisi_acc_vf_disable_fds(hisi_acc_vdev); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) { + hisi_acc_vf_start_device(hisi_acc_vdev); + return NULL; + } + + /* + * vfio_mig_get_next_state() does not use arcs other than the above + */ + WARN_ON(true); + return ERR_PTR(-EINVAL); +} + +static struct file * +hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state new_state) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + enum vfio_device_mig_state next_state; + struct file *res = NULL; + int ret; + + mutex_lock(&hisi_acc_vdev->state_mutex); + while (new_state != hisi_acc_vdev->mig_state) { + ret = vfio_mig_get_next_state(vdev, + hisi_acc_vdev->mig_state, + new_state, &next_state); + if (ret) { + res = ERR_PTR(-EINVAL); + break; + } + + res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state); + if (IS_ERR(res)) + break; + hisi_acc_vdev->mig_state = next_state; + if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) { + fput(res); + res = ERR_PTR(-EINVAL); + break; + } + } + mutex_unlock(&hisi_acc_vdev->state_mutex); + return res; +} + +static int +hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + *stop_copy_length = sizeof(struct acc_vf_data); + return 0; +} + +static int +hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state *curr_state) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + + mutex_lock(&hisi_acc_vdev->state_mutex); + *curr_state = hisi_acc_vdev->mig_state; + mutex_unlock(&hisi_acc_vdev->state_mutex); + return 0; +} + +static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev); + + if (hisi_acc_vdev->core_device.vdev.mig_ops) + return; + + mutex_lock(&hisi_acc_vdev->state_mutex); + hisi_acc_vf_reset(hisi_acc_vdev); + mutex_unlock(&hisi_acc_vdev->state_mutex); +} + +static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm; + struct pci_dev *vf_dev = vdev->pdev; + + if (pf_qm->ver == QM_HW_V3) { + /* + * ACC VF dev BAR2 region consists of both functional register space + * and migration control register space. For migration to work, we + * need access to both. Hence, we map the entire BAR2 region here. + * But unnecessarily exposing the migration BAR region to the Guest + * has the potential to prevent/corrupt the Guest migration. Hence, + * we restrict access to the migration control space from + * Guest(Please see mmap/ioctl/read/write override functions). + * + * Please note that it is OK to expose the entire VF BAR if migration + * is not supported or required as this cannot affect the ACC PF + * configurations. + * + * Also the HiSilicon ACC VF devices supported by this driver on + * HiSilicon hardware platforms are integrated end point devices + * and the platform lacks the capability to perform any PCIe P2P + * between these devices. + */ + + vf_qm->io_base = + ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX), + pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX)); + if (!vf_qm->io_base) + return -EIO; + + vf_qm->fun_type = QM_HW_VF; + vf_qm->ver = pf_qm->ver; + } else { + /* + * In the new HW platform, the migration function register space is in BAR2 of PF, + * and each VF occupies 8KB address space. + */ + vf_qm->io_base = pf_qm->io_base + QM_MIG_REGION_OFFSET + + hisi_acc_vdev->vf_id * QM_MIG_REGION_SIZE; + vf_qm->fun_type = QM_HW_PF; + } + vf_qm->pdev = vf_dev; + mutex_init(&vf_qm->mailbox_lock); + + return 0; +} + +static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev) +{ + struct hisi_qm *pf_qm; + struct pci_driver *pf_driver; + + if (!pdev->is_virtfn) + return NULL; + + switch (pdev->device) { + case PCI_DEVICE_ID_HUAWEI_SEC_VF: + pf_driver = hisi_sec_get_pf_driver(); + break; + case PCI_DEVICE_ID_HUAWEI_HPRE_VF: + pf_driver = hisi_hpre_get_pf_driver(); + break; + case PCI_DEVICE_ID_HUAWEI_ZIP_VF: + pf_driver = hisi_zip_get_pf_driver(); + break; + default: + return NULL; + } + + if (!pf_driver) + return NULL; + + pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver); + + return !IS_ERR(pf_qm) ? pf_qm : NULL; +} + +static size_t hisi_acc_get_resource_len(struct vfio_pci_core_device *vdev, + unsigned int index) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = + hisi_acc_drvdata(vdev->pdev); + + /* + * ACC VF dev 64KB BAR2 region consists of both functional + * register space and migration control register space, each + * uses 32KB BAR2 region, on the system with more than 64KB + * page size, even if the migration control register space + * is written by VM, it will only affects the VF. + * + * In order to support the live migration function in the + * system with a page size above 64KB, the driver needs + * to ensure that the VF region size is aligned with the + * system page size. + * + * On the new hardware platform, the live migration control register + * has been moved from VF to PF. + */ + if (hisi_acc_vdev->pf_qm->ver == QM_HW_V3) + return (pci_resource_len(vdev->pdev, index) >> 1); + + return pci_resource_len(vdev->pdev, index); +} + +static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev, + size_t count, loff_t *ppos, + size_t *new_count) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + + if (index == VFIO_PCI_BAR2_REGION_INDEX) { + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + resource_size_t end; + + end = hisi_acc_get_resource_len(vdev, index); + /* Check if access is for migration control region */ + if (pos >= end) + return -EINVAL; + + *new_count = min(count, (size_t)(end - pos)); + } + + return 0; +} + +static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + unsigned int index; + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + if (index == VFIO_PCI_BAR2_REGION_INDEX) { + u64 req_len, pgoff, req_start; + resource_size_t end; + + end = PAGE_ALIGN(hisi_acc_get_resource_len(vdev, index)); + req_len = vma->vm_end - vma->vm_start; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (req_start + req_len > end) + return -EINVAL; + } + + return vfio_pci_core_mmap(core_vdev, vma); +} + +static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev, + const char __user *buf, size_t count, + loff_t *ppos) +{ + size_t new_count = count; + int ret; + + ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count); + if (ret) + return ret; + + return vfio_pci_core_write(core_vdev, buf, new_count, ppos); +} + +static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev, + char __user *buf, size_t count, + loff_t *ppos) +{ + size_t new_count = count; + int ret; + + ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count); + if (ret) + return ret; + + return vfio_pci_core_read(core_vdev, buf, new_count, ppos); +} + +static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd, + unsigned long arg) +{ + if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + struct vfio_region_info info; + unsigned long minsz; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + if (info.index == VFIO_PCI_BAR2_REGION_INDEX) { + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + + info.size = hisi_acc_get_resource_len(vdev, info.index); + + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE | + VFIO_REGION_INFO_FLAG_MMAP; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + } + } + return vfio_pci_core_ioctl(core_vdev, cmd, arg); +} + +static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + int ret; + + lockdep_assert_held(&hisi_acc_vdev->open_mutex); + /* + * When the device is not opened, the io_base is not mapped. + * The driver cannot perform device read and write operations. + */ + if (!hisi_acc_vdev->dev_opened) { + seq_puts(seq, "device not opened!\n"); + return -EINVAL; + } + + ret = qm_wait_dev_not_ready(vf_qm); + if (ret) { + seq_puts(seq, "VF device not ready!\n"); + return ret; + } + + return 0; +} + +static int hisi_acc_vf_debug_cmd(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + u64 value; + int ret; + + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_debug_check(seq, vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + return ret; + } + + value = readl(vf_qm->io_base + QM_MB_CMD_SEND_BASE); + if (value == QM_MB_CMD_NOT_READY) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + seq_puts(seq, "mailbox cmd channel not ready!\n"); + return -EINVAL; + } + mutex_unlock(&hisi_acc_vdev->open_mutex); + seq_puts(seq, "mailbox cmd channel ready!\n"); + + return 0; +} + +static int hisi_acc_vf_dev_read(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + size_t vf_data_sz = offsetofend(struct acc_vf_data, padding); + struct acc_vf_data *vf_data; + int ret; + + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_debug_check(seq, vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + return ret; + } + + mutex_lock(&hisi_acc_vdev->state_mutex); + vf_data = kzalloc(sizeof(*vf_data), GFP_KERNEL); + if (!vf_data) { + ret = -ENOMEM; + goto mutex_release; + } + + vf_data->vf_qm_state = hisi_acc_vdev->vf_qm_state; + ret = vf_qm_read_data(&hisi_acc_vdev->vf_qm, vf_data); + if (ret) + goto migf_err; + + seq_hex_dump(seq, "Dev Data:", DUMP_PREFIX_OFFSET, 16, 1, + (const void *)vf_data, vf_data_sz, false); + + seq_printf(seq, + "guest driver load: %u\n" + "data size: %lu\n", + hisi_acc_vdev->vf_qm_state, + sizeof(struct acc_vf_data)); + +migf_err: + kfree(vf_data); +mutex_release: + mutex_unlock(&hisi_acc_vdev->state_mutex); + mutex_unlock(&hisi_acc_vdev->open_mutex); + + return ret; +} + +static int hisi_acc_vf_migf_read(struct seq_file *seq, void *data) +{ + struct device *vf_dev = seq->private; + struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev); + struct vfio_device *vdev = &core_device->vdev; + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev); + size_t vf_data_sz = offsetofend(struct acc_vf_data, padding); + struct hisi_acc_vf_migration_file *debug_migf = hisi_acc_vdev->debug_migf; + + /* Check whether the live migration operation has been performed */ + if (debug_migf->total_length < QM_MATCH_SIZE) { + seq_puts(seq, "device not migrated!\n"); + return -EAGAIN; + } + + seq_hex_dump(seq, "Mig Data:", DUMP_PREFIX_OFFSET, 16, 1, + (const void *)&debug_migf->vf_data, vf_data_sz, false); + seq_printf(seq, "migrate data length: %lu\n", debug_migf->total_length); + + return 0; +} + +static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); + struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device; + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + if (core_vdev->mig_ops) { + mutex_lock(&hisi_acc_vdev->open_mutex); + ret = hisi_acc_vf_qm_init(hisi_acc_vdev); + if (ret) { + mutex_unlock(&hisi_acc_vdev->open_mutex); + vfio_pci_core_disable(vdev); + return ret; + } + hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + hisi_acc_vdev->dev_opened = true; + mutex_unlock(&hisi_acc_vdev->open_mutex); + } + + vfio_pci_core_finish_enable(vdev); + return 0; +} + +static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + + hisi_acc_vf_disable_fds(hisi_acc_vdev); + mutex_lock(&hisi_acc_vdev->open_mutex); + hisi_acc_vdev->dev_opened = false; + if (vf_qm->ver == QM_HW_V3) + iounmap(vf_qm->io_base); + mutex_unlock(&hisi_acc_vdev->open_mutex); + vfio_pci_core_close_device(core_vdev); +} + +static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = { + .migration_set_state = hisi_acc_vfio_pci_set_device_state, + .migration_get_state = hisi_acc_vfio_pci_get_device_state, + .migration_get_data_size = hisi_acc_vfio_pci_get_data_size, +}; + +static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev); + struct pci_dev *pdev = to_pci_dev(core_vdev->dev); + struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev); + + hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1; + hisi_acc_vdev->pf_qm = pf_qm; + hisi_acc_vdev->vf_dev = pdev; + hisi_acc_vdev->vf_qm_state = QM_NOT_READY; + mutex_init(&hisi_acc_vdev->state_mutex); + mutex_init(&hisi_acc_vdev->open_mutex); + + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY; + core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops; + + return vfio_pci_core_init_dev(core_vdev); +} + +static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = { + .name = "hisi-acc-vfio-pci-migration", + .init = hisi_acc_vfio_pci_migrn_init_dev, + .release = vfio_pci_core_release_dev, + .open_device = hisi_acc_vfio_pci_open_device, + .close_device = hisi_acc_vfio_pci_close_device, + .ioctl = hisi_acc_vfio_pci_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = hisi_acc_vfio_pci_read, + .write = hisi_acc_vfio_pci_write, + .mmap = hisi_acc_vfio_pci_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { + .name = "hisi-acc-vfio-pci", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, + .open_device = hisi_acc_vfio_pci_open_device, + .close_device = vfio_pci_core_close_device, + .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + struct vfio_device *vdev = &hisi_acc_vdev->core_device.vdev; + struct hisi_acc_vf_migration_file *migf; + struct dentry *vfio_dev_migration; + struct dentry *vfio_hisi_acc; + struct device *dev = vdev->dev; + + if (!debugfs_initialized() || + !IS_ENABLED(CONFIG_VFIO_DEBUGFS)) + return; + + if (vdev->ops != &hisi_acc_vfio_pci_migrn_ops) + return; + + vfio_dev_migration = debugfs_lookup("migration", vdev->debug_root); + if (!vfio_dev_migration) { + dev_err(dev, "failed to lookup migration debugfs file!\n"); + return; + } + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return; + hisi_acc_vdev->debug_migf = migf; + + vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration); + debugfs_create_devm_seqfile(dev, "dev_data", vfio_hisi_acc, + hisi_acc_vf_dev_read); + debugfs_create_devm_seqfile(dev, "migf_data", vfio_hisi_acc, + hisi_acc_vf_migf_read); + debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc, + hisi_acc_vf_debug_cmd); +} + +static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev) +{ + kfree(hisi_acc_vdev->debug_migf); + hisi_acc_vdev->debug_migf = NULL; +} + +static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev; + const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops; + struct hisi_qm *pf_qm; + int vf_id; + int ret; + + pf_qm = hisi_acc_get_pf_qm(pdev); + if (pf_qm && pf_qm->ver >= QM_HW_V3) { + vf_id = pci_iov_vf_id(pdev); + if (vf_id >= 0) + ops = &hisi_acc_vfio_pci_migrn_ops; + else + pci_warn(pdev, "migration support failed, continue with generic interface\n"); + } + + hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device, + core_device.vdev, &pdev->dev, ops); + if (IS_ERR(hisi_acc_vdev)) + return PTR_ERR(hisi_acc_vdev); + + dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device); + ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device); + if (ret) + goto out_put_vdev; + + hisi_acc_vfio_debug_init(hisi_acc_vdev); + return 0; + +out_put_vdev: + vfio_put_device(&hisi_acc_vdev->core_device.vdev); + return ret; +} + +static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev); + + vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device); + hisi_acc_vf_debugfs_exit(hisi_acc_vdev); + vfio_put_device(&hisi_acc_vdev->core_device.vdev); +} + +static const struct pci_device_id hisi_acc_vfio_pci_table[] = { + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) }, + { } +}; + +MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table); + +static const struct pci_error_handlers hisi_acc_vf_err_handlers = { + .reset_done = hisi_acc_vf_pci_aer_reset_done, + .error_detected = vfio_pci_core_aer_err_detected, +}; + +static struct pci_driver hisi_acc_vfio_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = hisi_acc_vfio_pci_table, + .probe = hisi_acc_vfio_pci_probe, + .remove = hisi_acc_vfio_pci_remove, + .err_handler = &hisi_acc_vf_err_handlers, + .driver_managed_dma = true, +}; + +module_pci_driver(hisi_acc_vfio_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Liu Longfang "); +MODULE_AUTHOR("Shameer Kolothum "); +MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family"); diff --git a/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/hisi_acc_vfio_pci.h b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/hisi_acc_vfio_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..4544d2291d8beadddfaa77eb54a97dbdda7043d7 --- /dev/null +++ b/KAEKernelDriver/KAEKernelDriver-OLK-6.6/hisilicon/vfio/hisi_acc_vfio_pci.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 HiSilicon Ltd. */ + +#ifndef HISI_ACC_VFIO_PCI_H +#define HISI_ACC_VFIO_PCI_H + +#include "../hisi_acc_qm.h" + +#define MB_POLL_PERIOD_US 10 +#define MB_POLL_TIMEOUT_US 1000 +#define QM_CACHE_WB_START 0x204 +#define QM_CACHE_WB_DONE 0x208 +#define QM_MB_CMD_PAUSE_QM 0xe +#define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_IFC_INT_STATUS 0x0028 +#define SEC_CORE_INT_STATUS 0x301008 +#define HPRE_HAC_INT_STATUS 0x301800 +#define HZIP_CORE_INT_STATUS 0x3010AC + +#define QM_VFT_CFG_RDY 0x10006c +#define QM_VFT_CFG_OP_WR 0x100058 +#define QM_VFT_CFG_TYPE 0x10005c +#define QM_VFT_CFG 0x100060 +#define QM_VFT_CFG_OP_ENABLE 0x100054 +#define QM_VFT_CFG_DATA_L 0x100064 +#define QM_VFT_CFG_DATA_H 0x100068 + +#define ERROR_CHECK_TIMEOUT 100 +#define CHECK_DELAY_TIME 100 + +#define QM_SQC_VFT_BASE_SHIFT_V2 28 +#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) +#define QM_SQC_VFT_NUM_SHIFT_V2 45 +#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) +#define QM_MB_CMD_NOT_READY 0xffffffff + +/* RW regs */ +#define QM_REGS_MAX_LEN 7 +#define QM_REG_ADDR_OFFSET 0x0004 + +#define QM_XQC_ADDR_OFFSET 32U +#define QM_XQC_ADDR_LOW 0x1 +#define QM_XQC_ADDR_HIGH 0x2 + +#define QM_VF_AEQ_INT_MASK 0x0004 +#define QM_VF_EQ_INT_MASK 0x000c +#define QM_IFC_INT_SOURCE_V 0x0020 +#define QM_IFC_INT_MASK 0x0024 +#define QM_IFC_INT_SET_V 0x002c +#define QM_QUE_ISO_CFG_V 0x0030 +#define QM_PAGE_SIZE 0x0034 + +#define QM_EQC_DW0 0X8000 +#define QM_AEQC_DW0 0X8020 + +#define ACC_DRV_MAJOR_VER 1 +#define ACC_DRV_MINOR_VER 0 + +#define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC +#define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE + +#define QM_MIG_REGION_OFFSET 0x180000 +#define QM_MIG_REGION_SIZE 0x2000 + +#define QM_SUB_VERSION_ID 0x100210 +#define QM_EQC_PF_DW0 0x1c00 +#define QM_AEQC_PF_DW0 0x1c20 + +struct acc_vf_data { +#define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state) + /* QM match information */ + u64 acc_magic; + u32 qp_num; + u32 dev_id; + u32 que_iso_cfg; + u32 qp_base; + u32 vf_qm_state; + /* QM reserved match information */ + u16 major_ver; + u16 minor_ver; + u32 qm_rsv_state[2]; + + /* QM RW regs */ + u32 aeq_int_mask; + u32 eq_int_mask; + u32 ifc_int_source; + u32 ifc_int_mask; + u32 ifc_int_set; + u32 page_size; + + /* QM_EQC_DW has 7 regs */ + u32 qm_eqc_dw[7]; + + /* QM_AEQC_DW has 7 regs */ + u32 qm_aeqc_dw[7]; + + /* QM reserved 5 regs */ + u32 qm_rsv_regs[5]; + u32 padding; + /* QM memory init information */ + u64 eqe_dma; + u64 aeqe_dma; + u64 sqc_dma; + u64 cqc_dma; +}; + +struct hisi_acc_vf_migration_file { + struct file *filp; + struct mutex lock; + bool disabled; + + struct hisi_acc_vf_core_device *hisi_acc_vdev; + struct acc_vf_data vf_data; + size_t total_length; +}; + +struct hisi_acc_vf_core_device { + struct vfio_pci_core_device core_device; + u8 match_done; + /* + * io_base is only valid when dev_opened is true, + * which is protected by open_mutex. + */ + bool dev_opened; + /* Ensure the accuracy of dev_opened operation */ + struct mutex open_mutex; + + /* For migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + struct pci_dev *pf_dev; + struct pci_dev *vf_dev; + struct hisi_qm *pf_qm; + struct hisi_qm vf_qm; + /* + * vf_qm_state represents the QM_VF_STATE register value. + * It is set by Guest driver for the ACC VF dev indicating + * the driver has loaded and configured the dev correctly. + */ + u32 vf_qm_state; + int vf_id; + struct hisi_acc_vf_migration_file *resuming_migf; + struct hisi_acc_vf_migration_file *saving_migf; + + /* + * It holds migration data corresponding to the last migration + * and is used by the debugfs interface to report it. + */ + struct hisi_acc_vf_migration_file *debug_migf; +}; +#endif /* HISI_ACC_VFIO_PCI_H */