|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/2] amd-iommu: use a bitfield for DTE
On 2/4/19 5:19 AM, Paul Durrant wrote:
> The current use of get/set_field_from/in_reg_u32() is both inefficient and
> requires some ugly casting.
>
> This patch defines a new bitfield structure (amd_iommu_dte) and uses this
> structure in all DTE manipulation, resulting in much more readable and
> compact code.
>
> NOTE: This patch also includes some clean-up of get_dma_requestor_id() to
> change the types of the arguments from u16 to uint16_t.
>
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Brian Woods <brian.woods@xxxxxxx>
> ---
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> Cc: Brian Woods <brian.woods@xxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
> ---
> xen/drivers/passthrough/amd/iommu_guest.c | 55 ++---
> xen/drivers/passthrough/amd/iommu_map.c | 199 +++++-------------
> xen/drivers/passthrough/amd/pci_amd_iommu.c | 51 ++---
> xen/include/asm-x86/amd-iommu.h | 5 -
> xen/include/asm-x86/hvm/svm/amd-iommu-defs.h | 120 +++++------
> xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 20 +-
> 6 files changed, 139 insertions(+), 311 deletions(-)
>
> diff --git a/xen/drivers/passthrough/amd/iommu_guest.c
> b/xen/drivers/passthrough/amd/iommu_guest.c
> index 96175bb9ac..328e7509d5 100644
> --- a/xen/drivers/passthrough/amd/iommu_guest.c
> +++ b/xen/drivers/passthrough/amd/iommu_guest.c
> @@ -76,39 +76,10 @@ static void guest_iommu_disable(struct guest_iommu *iommu)
> iommu->enabled = 0;
> }
>
> -static uint64_t get_guest_cr3_from_dte(dev_entry_t *dte)
> +static uint64_t get_guest_cr3_from_dte(struct amd_iommu_dte *dte)
> {
> - uint64_t gcr3_1, gcr3_2, gcr3_3;
> -
> - gcr3_1 = get_field_from_reg_u32(dte->data[1],
> - IOMMU_DEV_TABLE_GCR3_1_MASK,
> - IOMMU_DEV_TABLE_GCR3_1_SHIFT);
> - gcr3_2 = get_field_from_reg_u32(dte->data[2],
> - IOMMU_DEV_TABLE_GCR3_2_MASK,
> - IOMMU_DEV_TABLE_GCR3_2_SHIFT);
> - gcr3_3 = get_field_from_reg_u32(dte->data[3],
> - IOMMU_DEV_TABLE_GCR3_3_MASK,
> - IOMMU_DEV_TABLE_GCR3_3_SHIFT);
> -
> - return ((gcr3_3 << 31) | (gcr3_2 << 15 ) | (gcr3_1 << 12)) >> PAGE_SHIFT;
> -}
> -
> -static uint16_t get_domid_from_dte(dev_entry_t *dte)
> -{
> - return get_field_from_reg_u32(dte->data[2],
> IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
> - IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT);
> -}
> -
> -static uint16_t get_glx_from_dte(dev_entry_t *dte)
> -{
> - return get_field_from_reg_u32(dte->data[1], IOMMU_DEV_TABLE_GLX_MASK,
> - IOMMU_DEV_TABLE_GLX_SHIFT);
> -}
> -
> -static uint16_t get_gv_from_dte(dev_entry_t *dte)
> -{
> - return get_field_from_reg_u32(dte->data[1],IOMMU_DEV_TABLE_GV_MASK,
> - IOMMU_DEV_TABLE_GV_SHIFT);
> + return ((dte->gcr3_trp_51_31 << 31) | (dte->gcr3_trp_30_15 << 15) |
> + (dte->gcr3_trp_14_12 << 12)) >> PAGE_SHIFT;
> }
>
> static unsigned int host_domid(struct domain *d, uint64_t g_domid)
> @@ -397,7 +368,7 @@ static int do_completion_wait(struct domain *d,
> cmd_entry_t *cmd)
> static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
> {
> uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id;
> - dev_entry_t *gdte, *mdte, *dte_base;
> + struct amd_iommu_dte *gdte, *mdte, *dte_base;
> struct amd_iommu *iommu = NULL;
> struct guest_iommu *g_iommu;
> uint64_t gcr3_gfn, gcr3_mfn;
> @@ -414,23 +385,23 @@ static int do_invalidate_dte(struct domain *d,
> cmd_entry_t *cmd)
> return 0;
>
> /* Sometimes guest invalidates devices from non-exists dtes */
> - if ( (gbdf * sizeof(dev_entry_t)) > g_iommu->dev_table.size )
> + if ( (gbdf * sizeof(struct amd_iommu_dte)) > g_iommu->dev_table.size )
> return 0;
>
> dte_mfn = guest_iommu_get_table_mfn(d,
>
> reg_to_u64(g_iommu->dev_table.reg_base),
> - sizeof(dev_entry_t), gbdf);
> + sizeof(struct amd_iommu_dte), gbdf);
> ASSERT(mfn_valid(_mfn(dte_mfn)));
>
> /* Read guest dte information */
> dte_base = map_domain_page(_mfn(dte_mfn));
>
> - gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
> + gdte = &dte_base[gbdf % (PAGE_SIZE / sizeof(struct amd_iommu_dte))];
>
> - gdom_id = get_domid_from_dte(gdte);
> + gdom_id = gdte->domain_id;
> gcr3_gfn = get_guest_cr3_from_dte(gdte);
> - glx = get_glx_from_dte(gdte);
> - gv = get_gv_from_dte(gdte);
> + glx = gdte->glx;
> + gv = gdte->gv;
>
> unmap_domain_page(dte_base);
>
> @@ -454,11 +425,11 @@ static int do_invalidate_dte(struct domain *d,
> cmd_entry_t *cmd)
> /* Setup host device entry */
> hdom_id = host_domid(d, gdom_id);
> req_id = get_dma_requestor_id(iommu->seg, mbdf);
> - mdte = iommu->dev_table.buffer + (req_id * sizeof(dev_entry_t));
> + dte_base = iommu->dev_table.buffer;
> + mdte = &dte_base[req_id];
>
> spin_lock_irqsave(&iommu->lock, flags);
> - iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id,
> - gcr3_mfn << PAGE_SHIFT, gv, glx);
> + iommu_dte_set_guest_cr3(mdte, hdom_id, gcr3_mfn, gv, glx);
>
> amd_iommu_flush_device(iommu, req_id);
> spin_unlock_irqrestore(&iommu->lock, flags);
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c
> b/xen/drivers/passthrough/amd/iommu_map.c
> index 5fda6063df..cbf00e9e72 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -99,167 +99,64 @@ static unsigned int set_iommu_pte_present(unsigned long
> pt_mfn,
> return flush_flags;
> }
>
> -void amd_iommu_set_root_page_table(uint32_t *dte, uint64_t root_ptr,
> - uint16_t domain_id, uint8_t paging_mode,
> - uint8_t valid)
> +void amd_iommu_set_root_page_table(struct amd_iommu_dte *dte,
> + uint64_t root_ptr, uint16_t domain_id,
> + uint8_t paging_mode, uint8_t valid)
> {
> - uint32_t addr_hi, addr_lo, entry;
> - set_field_in_reg_u32(domain_id, 0,
> - IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
> - IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
> - dte[2] = entry;
> -
> - addr_lo = root_ptr & DMA_32BIT_MASK;
> - addr_hi = root_ptr >> 32;
> -
> - set_field_in_reg_u32(addr_hi, 0,
> - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK,
> - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry);
> - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
> - IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK,
> - IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry);
> - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
> - IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK,
> - IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry);
> - dte[1] = entry;
> -
> - set_field_in_reg_u32(addr_lo >> PAGE_SHIFT, 0,
> - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK,
> - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry);
> - set_field_in_reg_u32(paging_mode, entry,
> - IOMMU_DEV_TABLE_PAGING_MODE_MASK,
> - IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry);
> - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
> - IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
> - IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
> - set_field_in_reg_u32(valid ? IOMMU_CONTROL_ENABLED :
> - IOMMU_CONTROL_DISABLED, entry,
> - IOMMU_DEV_TABLE_VALID_MASK,
> - IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
> - dte[0] = entry;
> -}
> -
> -void iommu_dte_set_iotlb(uint32_t *dte, uint8_t i)
> -{
> - uint32_t entry;
> -
> - entry = dte[3];
> - set_field_in_reg_u32(!!i, entry,
> - IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK,
> - IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT, &entry);
> - dte[3] = entry;
> + dte->domain_id = domain_id;
> + dte->pt_root = paddr_to_pfn(root_ptr);
> + dte->iw = 1;
> + dte->ir = 1;
> + dte->paging_mode = paging_mode;
> + dte->tv = 1;
> + dte->v = valid;
> }
>
> void __init amd_iommu_set_intremap_table(
> - uint32_t *dte, uint64_t intremap_ptr, uint8_t int_valid)
> + struct amd_iommu_dte *dte, uint64_t intremap_ptr, uint8_t int_valid)
> {
> - uint32_t addr_hi, addr_lo, entry;
> -
> - addr_lo = intremap_ptr & DMA_32BIT_MASK;
> - addr_hi = intremap_ptr >> 32;
> -
> - entry = dte[5];
> - set_field_in_reg_u32(addr_hi, entry,
> - IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK,
> - IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry);
> - /* Fixed and arbitrated interrupts remapepd */
> - set_field_in_reg_u32(2, entry,
> - IOMMU_DEV_TABLE_INT_CONTROL_MASK,
> - IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry);
> - dte[5] = entry;
> -
> - set_field_in_reg_u32(addr_lo >> 6, 0,
> - IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK,
> - IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT, &entry);
> - /* 2048 entries */
> - set_field_in_reg_u32(0xB, entry,
> - IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK,
> - IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry);
> -
> - /* unmapped interrupt results io page faults*/
> - set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
> - IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
> - IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT,
> &entry);
> - set_field_in_reg_u32(int_valid ? IOMMU_CONTROL_ENABLED :
> - IOMMU_CONTROL_DISABLED, entry,
> - IOMMU_DEV_TABLE_INT_VALID_MASK,
> - IOMMU_DEV_TABLE_INT_VALID_SHIFT, &entry);
> - dte[4] = entry;
> + dte->it_root = intremap_ptr >> 6;
> + dte->int_tab_len = 0xb; /* 2048 entries */
> + dte->int_ctl = 2; /* fixed and arbitrated interrupts remapped */
> + dte->ig = 0; /* unmapped interrupt results io page faults */
> + dte->iv = int_valid;
> }
>
> -void __init iommu_dte_add_device_entry(uint32_t *dte,
> +void __init iommu_dte_add_device_entry(struct amd_iommu_dte *dte,
> struct ivrs_mappings *ivrs_dev)
> {
> - uint32_t entry;
> - uint8_t sys_mgt, dev_ex, flags;
> - uint8_t mask = ~(0x7 << 3);
> -
> - dte[7] = dte[6] = dte[4] = dte[2] = dte[1] = dte[0] = 0;
> -
> - flags = ivrs_dev->device_flags;
> - sys_mgt = MASK_EXTR(flags, ACPI_IVHD_SYSTEM_MGMT);
> - dev_ex = ivrs_dev->dte_allow_exclusion;
> -
> - flags &= mask;
> - set_field_in_reg_u32(flags, 0,
> - IOMMU_DEV_TABLE_IVHD_FLAGS_MASK,
> - IOMMU_DEV_TABLE_IVHD_FLAGS_SHIFT, &entry);
> - dte[5] = entry;
> -
> - set_field_in_reg_u32(sys_mgt, 0,
> - IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
> - IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry);
> - set_field_in_reg_u32(dev_ex, entry,
> - IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK,
> - IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT, &entry);
> - dte[3] = entry;
> + uint8_t flags = ivrs_dev->device_flags;
> +
> + memset(dte, 0, sizeof(*dte));
> +
> + dte->init_pass = MASK_EXTR(flags, ACPI_IVHD_INIT_PASS);
> + dte->ext_int_pass = MASK_EXTR(flags, ACPI_IVHD_EINT_PASS);
> + dte->nmi_pass = MASK_EXTR(flags, ACPI_IVHD_NMI_PASS);
> + dte->lint0_pass = MASK_EXTR(flags, ACPI_IVHD_LINT0_PASS);
> + dte->lint1_pass = MASK_EXTR(flags, ACPI_IVHD_LINT1_PASS);
> + dte->sys_mgt = MASK_EXTR(flags, ACPI_IVHD_SYSTEM_MGMT);
> + dte->ex = ivrs_dev->dte_allow_exclusion;
> }
>
> -void iommu_dte_set_guest_cr3(uint32_t *dte, uint16_t dom_id, uint64_t gcr3,
> - int gv, unsigned int glx)
> +void iommu_dte_set_guest_cr3(struct amd_iommu_dte *dte, uint16_t dom_id,
> + uint64_t gcr3_mfn, uint8_t gv, uint8_t glx)
> {
> - uint32_t entry, gcr3_1, gcr3_2, gcr3_3;
> -
> - gcr3_3 = gcr3 >> 31;
> - gcr3_2 = (gcr3 >> 15) & 0xFFFF;
> - gcr3_1 = (gcr3 >> PAGE_SHIFT) & 0x7;
> +#define GCR3_MASK(hi, lo) (((1ul << ((hi) + 1)) - 1) & ~((1ul << (lo)) - 1))
> +#define GCR3_SHIFT(lo) ((lo) - PAGE_SHIFT)
>
> /* I bit must be set when gcr3 is enabled */
> - entry = dte[3];
> - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
> - IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK,
> - IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT, &entry);
> - /* update gcr3 */
> - set_field_in_reg_u32(gcr3_3, entry,
> - IOMMU_DEV_TABLE_GCR3_3_MASK,
> - IOMMU_DEV_TABLE_GCR3_3_SHIFT, &entry);
> - dte[3] = entry;
> -
> - set_field_in_reg_u32(dom_id, entry,
> - IOMMU_DEV_TABLE_DOMAIN_ID_MASK,
> - IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry);
> - /* update gcr3 */
> - entry = dte[2];
> - set_field_in_reg_u32(gcr3_2, entry,
> - IOMMU_DEV_TABLE_GCR3_2_MASK,
> - IOMMU_DEV_TABLE_GCR3_2_SHIFT, &entry);
> - dte[2] = entry;
> -
> - entry = dte[1];
> - /* Enable GV bit */
> - set_field_in_reg_u32(!!gv, entry,
> - IOMMU_DEV_TABLE_GV_MASK,
> - IOMMU_DEV_TABLE_GV_SHIFT, &entry);
> -
> - /* 1 level guest cr3 table */
> - set_field_in_reg_u32(glx, entry,
> - IOMMU_DEV_TABLE_GLX_MASK,
> - IOMMU_DEV_TABLE_GLX_SHIFT, &entry);
> - /* update gcr3 */
> - set_field_in_reg_u32(gcr3_1, entry,
> - IOMMU_DEV_TABLE_GCR3_1_MASK,
> - IOMMU_DEV_TABLE_GCR3_1_SHIFT, &entry);
> - dte[1] = entry;
> + dte->i = 1;
> +
> + dte->gcr3_trp_14_12 = (gcr3_mfn & GCR3_MASK(14, 12)) >> GCR3_SHIFT(12);
> + dte->gcr3_trp_30_15 = (gcr3_mfn & GCR3_MASK(30, 15)) >> GCR3_SHIFT(15);
> + dte->gcr3_trp_51_31 = (gcr3_mfn & GCR3_MASK(51, 31)) >> GCR3_SHIFT(31);
> +
> + dte->domain_id = dom_id;
> + dte->glx = glx;
> + dte->gv = gv;
> +
> +#undef GCR3_SHIFT
> +#undef GCR3_MASK
> }
>
> /* Walk io page tables and build level page tables if necessary
> @@ -369,7 +266,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned
> long dfn,
> static int update_paging_mode(struct domain *d, unsigned long dfn)
> {
> uint16_t bdf;
> - void *device_entry;
> + struct amd_iommu_dte *table, *dte;
> unsigned int req_id, level, offset;
> unsigned long flags;
> struct pci_dev *pdev;
> @@ -438,11 +335,11 @@ static int update_paging_mode(struct domain *d,
> unsigned long dfn)
> spin_lock_irqsave(&iommu->lock, flags);
> do {
> req_id = get_dma_requestor_id(pdev->seg, bdf);
> - device_entry = iommu->dev_table.buffer +
> - (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
> + table = iommu->dev_table.buffer;
> + dte = &table[req_id];
>
> /* valid = 0 only works for dom0 passthrough mode */
> - amd_iommu_set_root_page_table((uint32_t *)device_entry,
> + amd_iommu_set_root_page_table(dte,
>
> page_to_maddr(hd->arch.root_table),
> d->domain_id,
> hd->arch.paging_mode, 1);
> diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> index da6748320b..f6c17ba87a 100644
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -71,7 +71,7 @@ struct amd_iommu *find_iommu_for_device(int seg, int bdf)
> * Return original device id, if device has valid interrupt remapping
> * table setup for both select entry and alias entry.
> */
> -int get_dma_requestor_id(u16 seg, u16 bdf)
> +int get_dma_requestor_id(uint16_t seg, uint16_t bdf)
> {
> struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
> int req_id;
> @@ -85,35 +85,11 @@ int get_dma_requestor_id(u16 seg, u16 bdf)
> return req_id;
> }
>
> -static int is_translation_valid(u32 *entry)
> -{
> - return (get_field_from_reg_u32(entry[0],
> - IOMMU_DEV_TABLE_VALID_MASK,
> - IOMMU_DEV_TABLE_VALID_SHIFT) &&
> - get_field_from_reg_u32(entry[0],
> - IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
> - IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
> -}
> -
> -static void disable_translation(u32 *dte)
> -{
> - u32 entry;
> -
> - entry = dte[0];
> - set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
> - IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK,
> - IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry);
> - set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
> - IOMMU_DEV_TABLE_VALID_MASK,
> - IOMMU_DEV_TABLE_VALID_SHIFT, &entry);
> - dte[0] = entry;
> -}
> -
> static void amd_iommu_setup_domain_device(
> struct domain *domain, struct amd_iommu *iommu,
> - u8 devfn, struct pci_dev *pdev)
> + uint8_t devfn, struct pci_dev *pdev)
> {
> - void *dte;
> + struct amd_iommu_dte *table, *dte;
> unsigned long flags;
> int req_id, valid = 1;
> int dte_i = 0;
> @@ -131,20 +107,21 @@ static void amd_iommu_setup_domain_device(
>
> /* get device-table entry */
> req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
> - dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
> + table = iommu->dev_table.buffer;
> + dte = &table[req_id];
>
> spin_lock_irqsave(&iommu->lock, flags);
>
> - if ( !is_translation_valid((u32 *)dte) )
> + if ( !dte->v || !dte->tv )
> {
> /* bind DTE to domain page-tables */
> amd_iommu_set_root_page_table(
> - (u32 *)dte, page_to_maddr(hd->arch.root_table),
> domain->domain_id,
> + dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
> hd->arch.paging_mode, valid);
>
> if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
> iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
> - iommu_dte_set_iotlb((u32 *)dte, dte_i);
> + dte->i = dte_i;
>
> amd_iommu_flush_device(iommu, req_id);
>
> @@ -272,23 +249,25 @@ void amd_iommu_disable_domain_device(struct domain
> *domain,
> struct amd_iommu *iommu,
> u8 devfn, struct pci_dev *pdev)
> {
> - void *dte;
> + struct amd_iommu_dte *table, *dte;
> unsigned long flags;
> int req_id;
> u8 bus = pdev->bus;
>
> BUG_ON ( iommu->dev_table.buffer == NULL );
> req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
> - dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
> + table = iommu->dev_table.buffer;
> + dte = &table[req_id];
>
> spin_lock_irqsave(&iommu->lock, flags);
> - if ( is_translation_valid((u32 *)dte) )
> + if ( dte->tv && dte->v )
> {
> - disable_translation((u32 *)dte);
> + dte->tv = 0;
> + dte->v = 0;
>
> if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
> iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
> - iommu_dte_set_iotlb((u32 *)dte, 0);
> + dte->i = 0;
>
> amd_iommu_flush_device(iommu, req_id);
>
> diff --git a/xen/include/asm-x86/amd-iommu.h b/xen/include/asm-x86/amd-iommu.h
> index 02715b482b..ad8e4a35a2 100644
> --- a/xen/include/asm-x86/amd-iommu.h
> +++ b/xen/include/asm-x86/amd-iommu.h
> @@ -46,11 +46,6 @@ typedef struct cmd_entry
> {
> uint32_t data[4];
> } cmd_entry_t;
> -
> -typedef struct dev_entry
> -{
> - uint32_t data[8];
> -} dev_entry_t;
> #pragma pack()
>
> struct table_struct {
> diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
> b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
> index a3a49f91eb..40da33b271 100644
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
> @@ -107,74 +107,58 @@
> #define IOMMU_DEV_TABLE_INT_CONTROL_FORWARDED 0x1
> #define IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED 0x2
>
> -/* DeviceTable Entry[31:0] */
> -#define IOMMU_DEV_TABLE_VALID_MASK 0x00000001
> -#define IOMMU_DEV_TABLE_VALID_SHIFT 0
> -#define IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK 0x00000002
> -#define IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT 1
> -#define IOMMU_DEV_TABLE_PAGING_MODE_MASK 0x00000E00
> -#define IOMMU_DEV_TABLE_PAGING_MODE_SHIFT 9
> -#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK 0xFFFFF000
> -#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT 12
> -
> -/* DeviceTable Entry[63:32] */
> -#define IOMMU_DEV_TABLE_GV_SHIFT 23
> -#define IOMMU_DEV_TABLE_GV_MASK 0x800000
> -#define IOMMU_DEV_TABLE_GLX_SHIFT 24
> -#define IOMMU_DEV_TABLE_GLX_MASK 0x3000000
> -#define IOMMU_DEV_TABLE_GCR3_1_SHIFT 26
> -#define IOMMU_DEV_TABLE_GCR3_1_MASK 0x1c000000
> -
> -#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK 0x000FFFFF
> -#define IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT 0
> -#define IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK 0x20000000
> -#define IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT 29
> -#define IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK 0x40000000
> -#define IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT 30
> -
> -/* DeviceTable Entry[95:64] */
> -#define IOMMU_DEV_TABLE_DOMAIN_ID_MASK 0x0000FFFF
> -#define IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT 0
> -#define IOMMU_DEV_TABLE_GCR3_2_SHIFT 16
> -#define IOMMU_DEV_TABLE_GCR3_2_MASK 0xFFFF0000
> -
> -/* DeviceTable Entry[127:96] */
> -#define IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK 0x00000001
> -#define IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT 0
> -#define IOMMU_DEV_TABLE_SUPRESS_LOGGED_PAGES_MASK 0x00000002
> -#define IOMMU_DEV_TABLE_SUPRESS_LOGGED_PAGES_SHIFT 1
> -#define IOMMU_DEV_TABLE_SUPRESS_ALL_PAGES_MASK 0x00000004
> -#define IOMMU_DEV_TABLE_SUPRESS_ALL_PAGES_SHIFT 2
> -#define IOMMU_DEV_TABLE_IO_CONTROL_MASK 0x00000018
> -#define IOMMU_DEV_TABLE_IO_CONTROL_SHIFT 3
> -#define IOMMU_DEV_TABLE_IOTLB_CACHE_HINT_MASK 0x00000020
> -#define IOMMU_DEV_TABLE_IOTLB_CACHE_HINT_SHIFT 5
> -#define IOMMU_DEV_TABLE_SNOOP_DISABLE_MASK 0x00000040
> -#define IOMMU_DEV_TABLE_SNOOP_DISABLE_SHIFT 6
> -#define IOMMU_DEV_TABLE_ALLOW_EXCLUSION_MASK 0x00000080
> -#define IOMMU_DEV_TABLE_ALLOW_EXCLUSION_SHIFT 7
> -#define IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK 0x00000300
> -#define IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT 8
> -
> -/* DeviceTable Entry[159:128] */
> -#define IOMMU_DEV_TABLE_INT_VALID_MASK 0x00000001
> -#define IOMMU_DEV_TABLE_INT_VALID_SHIFT 0
> -#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK 0x0000001E
> -#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT 1
> -#define IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK 0x0000000020
> -#define IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT 5
> -#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK 0xFFFFFFC0
> -#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT 6
> -#define IOMMU_DEV_TABLE_GCR3_3_SHIFT 11
> -#define IOMMU_DEV_TABLE_GCR3_3_MASK 0xfffff800
> -
> -/* DeviceTable Entry[191:160] */
> -#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK 0x000FFFFF
> -#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT 0
> -#define IOMMU_DEV_TABLE_IVHD_FLAGS_SHIFT 24
> -#define IOMMU_DEV_TABLE_IVHD_FLAGS_MASK 0xC7000000
> -#define IOMMU_DEV_TABLE_INT_CONTROL_MASK 0x30000000
> -#define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT 28
> +struct amd_iommu_dte {
> + /* 0 - 63 */
> + uint64_t v:1;
> + uint64_t tv:1;
> + uint64_t reserved0:5;
> + uint64_t had:2;
> + uint64_t paging_mode:3;
> + uint64_t pt_root:40;
> + uint64_t ppr:1;
> + uint64_t gprp:1;
> + uint64_t giov:1;
> + uint64_t gv:1;
> + uint64_t glx:2;
> + uint64_t gcr3_trp_14_12:3;
> + uint64_t ir:1;
> + uint64_t iw:1;
> + uint64_t reserved1:1;
> +
> + /* 64 - 127 */
> + uint64_t domain_id:16;
> + uint64_t gcr3_trp_30_15:16;
> + uint64_t i:1;
> + uint64_t se:1;
> + uint64_t sa:1;
> + uint64_t ioctl:2;
> + uint64_t cache:1;
> + uint64_t sd:1;
> + uint64_t ex:1;
> + uint64_t sys_mgt:2;
> + uint64_t reserved2:1;
> + uint64_t gcr3_trp_51_31:21;
> +
> + /* 128 - 191 */
> + uint64_t iv:1;
> + uint64_t int_tab_len:4;
> + uint64_t ig:1;
> + uint64_t it_root:46;
> + uint64_t reserved3:4;
> + uint64_t init_pass:1;
> + uint64_t ext_int_pass:1;
> + uint64_t nmi_pass:1;
> + uint64_t reserved4:1;
> + uint64_t int_ctl:2;
> + uint64_t lint0_pass:1;
> + uint64_t lint1_pass:1;
> +
> + /* 192 - 255 */
> + uint64_t reserved5:54;
> + uint64_t attr_v:1;
> + uint64_t mode0_fc:1;
> + uint64_t snoop_attr:8;
> +};
>
> /* Command Buffer */
> #define IOMMU_CMD_BUFFER_BASE_LOW_OFFSET 0x08
> diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> index 1c1971bb7c..e0d5d23978 100644
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> @@ -70,15 +70,17 @@ int __must_check amd_iommu_flush_iotlb_all(struct domain
> *d);
> void amd_iommu_share_p2m(struct domain *d);
>
> /* device table functions */
> -int get_dma_requestor_id(u16 seg, u16 bdf);
> -void amd_iommu_set_intremap_table(
> - u32 *dte, u64 intremap_ptr, u8 int_valid);
> -void amd_iommu_set_root_page_table(
> - u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid);
> -void iommu_dte_set_iotlb(u32 *dte, u8 i);
> -void iommu_dte_add_device_entry(u32 *dte, struct ivrs_mappings *ivrs_dev);
> -void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64 gcr3,
> - int gv, unsigned int glx);
> +int get_dma_requestor_id(uint16_t seg, uint16_t bdf);
> +void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte,
> + uint64_t intremap_ptr,
> + uint8_t int_valid);
> +void amd_iommu_set_root_page_table(struct amd_iommu_dte *dte,
> + uint64_t root_ptr, uint16_t domain_id,
> + uint8_t paging_mode, uint8_t valid);
> +void iommu_dte_add_device_entry(struct amd_iommu_dte *dte,
> + struct ivrs_mappings *ivrs_dev);
> +void iommu_dte_set_guest_cr3(struct amd_iommu_dte *dte, uint16_t dom_id,
> + uint64_t gcr3_mfn, uint8_t gv, uint8_t glx);
>
> /* send cmd to iommu */
> void amd_iommu_flush_all_pages(struct domain *d);
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |