[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 03/10] xen/arm: smmuv3: Revert patch related to XArray



XArray is not implemented in XEN revert the patch that introduce the
XArray code in SMMUv3 driver.

XArray is added in preparation for sharing some ASIDs with the CPU,

As XEN support only Stage-2 translation, ASID is used for Stage-1
translation there is no consequences of reverting this patch for XEN.

Once XArray is implemented in XEN this patch can be added in XEN if XEN
supports Stage-1 translation.

Reverted the commit 0299a1a81ca056e79c1a7fb751f936ec0d5c7afe

Signed-off-by: Rahul Singh <rahul.singh@xxxxxxx>
Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
Changes since v2:
- Added consequences of reverting this patch in commit message
Changes since v3: No changes
Changes since v4: No changes
---
---
 xen/drivers/passthrough/arm/smmu-v3.c | 27 +++++++++------------------
 1 file changed, 9 insertions(+), 18 deletions(-)

diff --git a/xen/drivers/passthrough/arm/smmu-v3.c 
b/xen/drivers/passthrough/arm/smmu-v3.c
index 8b7747ed38..7b29ead48c 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -625,6 +625,7 @@ struct arm_smmu_device {
 
 #define ARM_SMMU_MAX_ASIDS             (1 << 16)
        unsigned int                    asid_bits;
+       DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
 
 #define ARM_SMMU_MAX_VMIDS             (1 << 16)
        unsigned int                    vmid_bits;
@@ -690,8 +691,6 @@ struct arm_smmu_option_prop {
        const char *prop;
 };
 
-static DEFINE_XARRAY_ALLOC1(asid_xa);
-
 static struct arm_smmu_option_prop arm_smmu_options[] = {
        { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
        { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -1346,14 +1345,6 @@ static void arm_smmu_free_cd_tables(struct 
arm_smmu_domain *smmu_domain)
        cdcfg->cdtab = NULL;
 }
 
-static void arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
-{
-       if (!cd->asid)
-               return;
-
-       xa_erase(&asid_xa, cd->asid);
-}
-
 /* Stream table manipulation functions */
 static void
 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc 
*desc)
@@ -1988,9 +1979,10 @@ static void arm_smmu_domain_free(struct iommu_domain 
*domain)
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
 
-               if (cfg->cdcfg.cdtab)
+               if (cfg->cdcfg.cdtab) {
                        arm_smmu_free_cd_tables(smmu_domain);
-               arm_smmu_free_asid(&cfg->cd);
+                       arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
+               }
        } else {
                struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
                if (cfg->vmid)
@@ -2005,15 +1997,14 @@ static int arm_smmu_domain_finalise_s1(struct 
arm_smmu_domain *smmu_domain,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int ret;
-       u32 asid;
+       int asid;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
        typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = 
&pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
-       ret = xa_alloc(&asid_xa, &asid, &cfg->cd,
-                      XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
-       if (ret)
-               return ret;
+       asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
+       if (asid < 0)
+               return asid;
 
        cfg->s1cdmax = master->ssid_bits;
 
@@ -2046,7 +2037,7 @@ static int arm_smmu_domain_finalise_s1(struct 
arm_smmu_domain *smmu_domain,
 out_free_cd_tables:
        arm_smmu_free_cd_tables(smmu_domain);
 out_free_asid:
-       arm_smmu_free_asid(&cfg->cd);
+       arm_smmu_bitmap_free(smmu->asid_map, asid);
        return ret;
 }
 
-- 
2.17.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.