[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 09/13] DMA: Add dma_map_decrypted/dma_unmap_encrypted() function
 
- To: Christoph Hellwig <hch@xxxxxxxxxxxxx>
 
- From: Tianyu Lan <ltykernel@xxxxxxxxx>
 
- Date: Thu, 29 Jul 2021 23:13:11 +0800
 
- Cc: iommu@xxxxxxxxxxxxxxxxxxxxxxxxxx, linux-arch@xxxxxxxxxxxxxxx, linux-hyperv@xxxxxxxxxxxxxxx, linux-kernel@xxxxxxxxxxxxxxx, linux-scsi@xxxxxxxxxxxxxxx, netdev@xxxxxxxxxxxxxxx, vkuznets@xxxxxxxxxx, anparri@xxxxxxxxxxxxx, kys@xxxxxxxxxxxxx, haiyangz@xxxxxxxxxxxxx, sthemmin@xxxxxxxxxxxxx, wei.liu@xxxxxxxxxx, decui@xxxxxxxxxxxxx, tglx@xxxxxxxxxxxxx, mingo@xxxxxxxxxx, bp@xxxxxxxxx, x86@xxxxxxxxxx, hpa@xxxxxxxxx, dave.hansen@xxxxxxxxxxxxxxx, luto@xxxxxxxxxx, peterz@xxxxxxxxxxxxx, konrad.wilk@xxxxxxxxxx, boris.ostrovsky@xxxxxxxxxx, jgross@xxxxxxxx, sstabellini@xxxxxxxxxx, joro@xxxxxxxxxx, will@xxxxxxxxxx, davem@xxxxxxxxxxxxx, kuba@xxxxxxxxxx, jejb@xxxxxxxxxxxxx, martin.petersen@xxxxxxxxxx, arnd@xxxxxxxx, m.szyprowski@xxxxxxxxxxx, robin.murphy@xxxxxxx, thomas.lendacky@xxxxxxx, brijesh.singh@xxxxxxx, ardb@xxxxxxxxxx, Tianyu.Lan@xxxxxxxxxxxxx, rientjes@xxxxxxxxxx, martin.b.radev@xxxxxxxxx, akpm@xxxxxxxxxxxxxxxxxxxx, rppt@xxxxxxxxxx, kirill.shutemov@xxxxxxxxxxxxxxx, aneesh.kumar@xxxxxxxxxxxxx, krish.sadhukhan@xxxxxxxxxx, saravanand@xxxxxx, xen-devel@xxxxxxxxxxxxxxxxxxxx, pgonda@xxxxxxxxxx, david@xxxxxxxxxx, keescook@xxxxxxxxxxxx, hannes@xxxxxxxxxxx, sfr@xxxxxxxxxxxxxxxx, michael.h.kelley@xxxxxxxxxxxxx
 
- Delivery-date: Thu, 29 Jul 2021 15:13:43 +0000
 
- List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
 
 
 
Hi Christoph:
     Could you have a look at this patch and the following patch
"[PATCH 10/13] x86/Swiotlb: Add Swiotlb bounce buffer remap function
for HV IVM" These two patches follows your previous comments and add 
dma_map_decrypted/dma_unmap_decrypted(). I don't add arch prefix because 
each platform may populate their callbacks into dma memory decrypted ops.
Thanks.
On 7/28/2021 10:52 PM, Tianyu Lan wrote:
 
From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
In Hyper-V Isolation VM with AMD SEV, swiotlb boucne buffer
needs to be mapped into address space above vTOM and so
introduce dma_map_decrypted/dma_unmap_encrypted() to map/unmap
bounce buffer memory. The platform can populate man/unmap callback
in the dma memory decrypted ops.
Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
  include/linux/dma-map-ops.h |  9 +++++++++
  kernel/dma/mapping.c        | 22 ++++++++++++++++++++++
  2 files changed, 31 insertions(+)
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 0d53a96a3d64..01d60a024e45 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -71,6 +71,11 @@ struct dma_map_ops {
        unsigned long (*get_merge_boundary)(struct device *dev);
  };
  
+struct dma_memory_decrypted_ops {
+       void *(*map)(void *addr, unsigned long size);
+       void (*unmap)(void *addr);
+};
+
  #ifdef CONFIG_DMA_OPS
  #include <asm/dma-mapping.h>
   
@@ -374,6 +379,10 @@ static inline void debug_dma_dump_mappings(struct device *dev)
  }
  #endif /* CONFIG_DMA_API_DEBUG */
   
+void *dma_map_decrypted(void *addr, unsigned long size);
+int dma_unmap_decrypted(void *addr, unsigned long size);
+
  extern const struct dma_map_ops dma_dummy_ops;
+extern struct dma_memory_decrypted_ops dma_memory_generic_decrypted_ops;
   
  #endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 2b06a809d0b9..6fb150dc1750 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -13,11 +13,13 @@
  #include <linux/of_device.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
+#include <asm/set_memory.h>
  #include "debug.h"
  #include "direct.h"
   
  bool dma_default_coherent;
  
+struct dma_memory_decrypted_ops dma_memory_generic_decrypted_ops;
  /*
   * Managed DMA API
   */
@@ -736,3 +738,23 @@ unsigned long dma_get_merge_boundary(struct device *dev)
        return ops->get_merge_boundary(dev);
  }
  EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
+
+void *dma_map_decrypted(void *addr, unsigned long size)
+{
+       if (set_memory_decrypted((unsigned long)addr,
+                                size / PAGE_SIZE))
+               return NULL;
+
+       if (dma_memory_generic_decrypted_ops.map)
+               return dma_memory_generic_decrypted_ops.map(addr, size);
+       else
+               return addr;
+}
+
+int dma_unmap_encrypted(void *addr, unsigned long size)
+{
+       if (dma_memory_generic_decrypted_ops.unmap)
+               dma_memory_generic_decrypted_ops.unmap(addr);
+
+       return set_memory_encrypted((unsigned long)addr, size / PAGE_SIZE);
+}
 
 
 
    
     |