|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [PATCH 4/9] VT-d: adjust domid map updating when unmapping context
> From: Jan Beulich <jbeulich@xxxxxxxx>
> Sent: Wednesday, June 9, 2021 5:28 PM
>
> When an earlier error occurred, cleaning up the domid mapping data is
> wrong, as references likely still exist. The only exception to this is
> when the actual unmapping worked, but some flush failed (supposedly
> impossible after XSA-373). The guest will get crashed in such a case
> though, so add fallback cleanup to domain destruction to cover this
> case. This in turn makes it desirable to silence the dprintk() in
> domain_iommu_domid().
>
> Note that no error will be returned anymore when the lookup fails - in
> the common case lookup failure would already have caused
> domain_context_unmap_one() to fail, yet even from a more general
> perspective it doesn't look right to fail domain_context_unmap() in such
> a case when this was the last device, but not when any earlier unmap was
> otherwise successful.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
>
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -80,9 +80,11 @@ static int domain_iommu_domid(struct dom
> i = find_next_bit(iommu->domid_bitmap, nr_dom, i+1);
> }
>
> - dprintk(XENLOG_ERR VTDPREFIX,
> - "Cannot get valid iommu domid: domid=%d iommu->index=%d\n",
> - d->domain_id, iommu->index);
> + if ( !d->is_dying )
> + dprintk(XENLOG_ERR VTDPREFIX,
> + "Cannot get valid iommu %u domid: %pd\n",
> + iommu->index, d);
> +
> return -1;
> }
>
> @@ -147,6 +149,17 @@ static int context_get_domain_id(struct
> return domid;
> }
>
> +static void cleanup_domid_map(struct domain *domain, struct vtd_iommu
> *iommu)
> +{
> + int iommu_domid = domain_iommu_domid(domain, iommu);
> +
> + if ( iommu_domid >= 0 )
> + {
> + clear_bit(iommu_domid, iommu->domid_bitmap);
> + iommu->domid_map[iommu_domid] = 0;
> + }
> +}
> +
> static void sync_cache(const void *addr, unsigned int size)
> {
> static unsigned long clflush_size = 0;
> @@ -1724,6 +1737,9 @@ static int domain_context_unmap(struct d
> goto out;
> }
>
> + if ( ret )
> + goto out;
> +
> /*
> * if no other devices under the same iommu owned by this domain,
> * clear iommu in iommu_bitmap and clear domain_id in domid_bitmp
> @@ -1743,19 +1759,8 @@ static int domain_context_unmap(struct d
>
> if ( found == 0 )
> {
> - int iommu_domid;
> -
> clear_bit(iommu->index, &dom_iommu(domain)-
> >arch.vtd.iommu_bitmap);
> -
> - iommu_domid = domain_iommu_domid(domain, iommu);
> - if ( iommu_domid == -1 )
> - {
> - ret = -EINVAL;
> - goto out;
> - }
> -
> - clear_bit(iommu_domid, iommu->domid_bitmap);
> - iommu->domid_map[iommu_domid] = 0;
> + cleanup_domid_map(domain, iommu);
> }
>
> out:
> @@ -1775,6 +1780,7 @@ static void iommu_domain_teardown(struct
> {
> struct domain_iommu *hd = dom_iommu(d);
> struct mapped_rmrr *mrmrr, *tmp;
> + const struct acpi_drhd_unit *drhd;
>
> if ( list_empty(&acpi_drhd_units) )
> return;
> @@ -1786,6 +1792,9 @@ static void iommu_domain_teardown(struct
> }
>
> ASSERT(!hd->arch.vtd.pgd_maddr);
> +
> + for_each_drhd_unit ( drhd )
> + cleanup_domid_map(d, drhd->iommu);
> }
>
> static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |