[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 17/28] x86/vvtd: save and restore emulated VT-d



On Fri, Nov 17, 2017 at 02:22:24PM +0800, Chao Gao wrote:
> Provide a save-restore pair to save/restore registers and non-register
> status.
> 
> Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
> Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
> ---
> v3:
>  - use one entry to save both vvtd registers and other intermediate
>  state
> ---
>  xen/drivers/passthrough/vtd/vvtd.c     | 57 
> +++++++++++++++++++++++-----------
>  xen/include/public/arch-x86/hvm/save.h | 18 ++++++++++-
>  2 files changed, 56 insertions(+), 19 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/vtd/vvtd.c 
> b/xen/drivers/passthrough/vtd/vvtd.c
> index 81170ec..f6bde69 100644
> --- a/xen/drivers/passthrough/vtd/vvtd.c
> +++ b/xen/drivers/passthrough/vtd/vvtd.c
> @@ -27,8 +27,10 @@
>  #include <asm/event.h>
>  #include <asm/io_apic.h>
>  #include <asm/hvm/domain.h>
> +#include <asm/hvm/save.h>
>  #include <asm/hvm/support.h>
>  #include <asm/p2m.h>
> +#include <public/hvm/save.h>
>  
>  #include "iommu.h"
>  #include "vtd.h"
> @@ -38,20 +40,6 @@
>  
>  #define VVTD_FRCD_NUM   1ULL
>  #define VVTD_FRCD_START (DMAR_IRTA_REG + 8)
> -#define VVTD_FRCD_END   (VVTD_FRCD_START + VVTD_FRCD_NUM * 16)
> -#define VVTD_MAX_OFFSET VVTD_FRCD_END
> -
> -struct hvm_hw_vvtd {
> -    bool eim_enabled;
> -    bool intremap_enabled;
> -    uint32_t fault_index;
> -
> -    /* Interrupt remapping table base gfn and the max of entries */
> -    uint16_t irt_max_entry;
> -    gfn_t irt;

You are changing gfn_t to uint64_t, is gfn_t not working with the
migration stream?

Also I think this duplication of fields (having all registers in
'regs' and some cached in miscellaneous top level fields is not a good
approach.

> -
> -    uint32_t regs[VVTD_MAX_OFFSET/sizeof(uint32_t)];
> -};
>  
>  struct vvtd {
>      /* Base address of remapping hardware register-set */
> @@ -776,7 +764,7 @@ static void write_gcmd_sirtp(struct vvtd *vvtd, uint32_t 
> val)
>      if ( vvtd->hw.intremap_enabled )
>          vvtd_info("Update Interrupt Remapping Table when active\n");
>  
> -    if ( gfn_x(vvtd->hw.irt) != PFN_DOWN(DMA_IRTA_ADDR(irta)) ||
> +    if ( vvtd->hw.irt != PFN_DOWN(DMA_IRTA_ADDR(irta)) ||
>           vvtd->hw.irt_max_entry != DMA_IRTA_SIZE(irta) )
>      {
>          if ( vvtd->irt_base )
> @@ -786,14 +774,14 @@ static void write_gcmd_sirtp(struct vvtd *vvtd, 
> uint32_t val)
>                                       sizeof(struct iremap_entry)));
>              vvtd->irt_base = NULL;
>          }
> -        vvtd->hw.irt = _gfn(PFN_DOWN(DMA_IRTA_ADDR(irta)));
> +        vvtd->hw.irt = PFN_DOWN(DMA_IRTA_ADDR(irta));
>          vvtd->hw.irt_max_entry = DMA_IRTA_SIZE(irta);
>          vvtd->hw.eim_enabled = !!(irta & IRTA_EIME);
>          vvtd_info("Update IR info (addr=%lx eim=%d size=%d)\n",
> -                  gfn_x(vvtd->hw.irt), vvtd->hw.eim_enabled,
> +                  vvtd->hw.irt, vvtd->hw.eim_enabled,
>                    vvtd->hw.irt_max_entry);
>  
> -        vvtd->irt_base = map_guest_pages(vvtd->domain, gfn_x(vvtd->hw.irt),
> +        vvtd->irt_base = map_guest_pages(vvtd->domain, vvtd->hw.irt,
>                                           PFN_UP(vvtd->hw.irt_max_entry *
>                                                  sizeof(struct 
> iremap_entry)));
>      }
> @@ -1138,6 +1126,39 @@ static bool vvtd_is_remapping(const struct domain *d,
>      return !irq_remapping_request_index(irq, &idx);
>  }
>  
> +static int vvtd_load(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct vvtd *vvtd = domain_vvtd(d);
> +    uint64_t iqa;
> +
> +    if ( !vvtd )
> +        return -ENODEV;
> +
> +    if ( hvm_load_entry(VVTD, h, &vvtd->hw) )
> +        return -EINVAL;
> +
> +    iqa = vvtd_get_reg_quad(vvtd, DMAR_IQA_REG);
> +    vvtd->irt_base = map_guest_pages(vvtd->domain, vvtd->hw.irt,
> +                                     PFN_UP(vvtd->hw.irt_max_entry *
> +                                            sizeof(struct iremap_entry)));
> +    vvtd->inv_queue_base = map_guest_pages(vvtd->domain,
> +                                           PFN_DOWN(DMA_IQA_ADDR(iqa)),
> +                                           1 << DMA_IQA_QS(iqa));

Why are you unconditionally mapping those pages? Shouldn't you check
that the relevant features are enabled?

Both could be 0 or simply point to garbage.

> +    return 0;
> +}
> +
> +static int vvtd_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct vvtd *vvtd = domain_vvtd(d);
> +
> +    if ( !vvtd )
> +        return 0;
> +
> +    return hvm_save_entry(VVTD, 0, h, &vvtd->hw);
> +}
> +
> +HVM_REGISTER_SAVE_RESTORE(VVTD, vvtd_save, vvtd_load, 1, HVMSR_PER_DOM);
> +
>  static void vvtd_reset(struct vvtd *vvtd)
>  {
>      uint64_t cap = cap_set_num_fault_regs(VVTD_FRCD_NUM)
> diff --git a/xen/include/public/arch-x86/hvm/save.h 
> b/xen/include/public/arch-x86/hvm/save.h
> index fd7bf3f..24a513b 100644
> --- a/xen/include/public/arch-x86/hvm/save.h
> +++ b/xen/include/public/arch-x86/hvm/save.h
> @@ -639,10 +639,26 @@ struct hvm_msr {
>  
>  #define CPU_MSR_CODE  20
>  
> +#define VVTD_MAX_OFFSET 0xd0

You used to have some kind of formula to calculate VVTD_MAX_OFFSET,
yet here the value is just hardcoded. Any reason for this?

> +struct hvm_hw_vvtd
> +{
> +    uint32_t eim_enabled : 1,
> +             intremap_enabled : 1;
> +    uint32_t fault_index;
> +
> +    /* Interrupt remapping table base gfn and the max of entries */
> +    uint32_t irt_max_entry;
> +    uint64_t irt;
> +
> +    uint32_t regs[VVTD_MAX_OFFSET/sizeof(uint32_t)];
> +};
> +
> +DECLARE_HVM_SAVE_TYPE(VVTD, 21, struct hvm_hw_vvtd);

Adding new fields to this struct in a migration compatible way is
going to be a PITA, but there's no easy solution to this I'm afraid...

Thanks, Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.