[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v1 15/16] block-dma: properly take MMIO path



From: Leon Romanovsky <leonro@xxxxxxxxxx>

Make sure that CPU is not synced and IOMMU is configured to take
MMIO path by providing newly introduced DMA_ATTR_MMIO attribute.

Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx>
---
 block/blk-mq-dma.c         | 13 +++++++++++--
 include/linux/blk-mq-dma.h |  6 +++++-
 include/linux/blk_types.h  |  2 ++
 3 files changed, 18 insertions(+), 3 deletions(-)

diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 37e2142be4f7d..d415088ed9fd2 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -87,8 +87,13 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, 
struct phys_vec *vec)
 static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
                struct blk_dma_iter *iter, struct phys_vec *vec)
 {
+       unsigned int attrs = 0;
+
+       if (req->cmd_flags & REQ_MMIO)
+               attrs = DMA_ATTR_MMIO;
+
        iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len,
-                       rq_dma_dir(req), 0);
+                       rq_dma_dir(req), attrs);
        if (dma_mapping_error(dma_dev, iter->addr)) {
                iter->status = BLK_STS_RESOURCE;
                return false;
@@ -103,14 +108,17 @@ static bool blk_rq_dma_map_iova(struct request *req, 
struct device *dma_dev,
 {
        enum dma_data_direction dir = rq_dma_dir(req);
        unsigned int mapped = 0;
+       unsigned int attrs = 0;
        int error;
 
        iter->addr = state->addr;
        iter->len = dma_iova_size(state);
+       if (req->cmd_flags & REQ_MMIO)
+               attrs = DMA_ATTR_MMIO;
 
        do {
                error = dma_iova_link(dma_dev, state, vec->paddr, mapped,
-                               vec->len, dir, 0);
+                               vec->len, dir, attrs);
                if (error)
                        break;
                mapped += vec->len;
@@ -176,6 +184,7 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct 
device *dma_dev,
                         * same as non-P2P transfers below and during unmap.
                         */
                        req->cmd_flags &= ~REQ_P2PDMA;
+                       req->cmd_flags |= REQ_MMIO;
                        break;
                default:
                        iter->status = BLK_STS_INVAL;
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index c26a01aeae006..6c55f5e585116 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -48,12 +48,16 @@ static inline bool blk_rq_dma_map_coalesce(struct 
dma_iova_state *state)
 static inline bool blk_rq_dma_unmap(struct request *req, struct device 
*dma_dev,
                struct dma_iova_state *state, size_t mapped_len)
 {
+       unsigned int attrs = 0;
+
        if (req->cmd_flags & REQ_P2PDMA)
                return true;
 
        if (dma_use_iova(state)) {
+               if (req->cmd_flags & REQ_MMIO)
+                       attrs = DMA_ATTR_MMIO;
                dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
-                                0);
+                                attrs);
                return true;
        }
 
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 09b99d52fd365..283058bcb5b14 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -387,6 +387,7 @@ enum req_flag_bits {
        __REQ_FS_PRIVATE,       /* for file system (submitter) use */
        __REQ_ATOMIC,           /* for atomic write operations */
        __REQ_P2PDMA,           /* contains P2P DMA pages */
+       __REQ_MMIO,             /* contains MMIO memory */
        /*
         * Command specific flags, keep last:
         */
@@ -420,6 +421,7 @@ enum req_flag_bits {
 #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
 #define REQ_ATOMIC     (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
 #define REQ_P2PDMA     (__force blk_opf_t)(1ULL << __REQ_P2PDMA)
+#define REQ_MMIO       (__force blk_opf_t)(1ULL << __REQ_MMIO)
 
 #define REQ_NOUNMAP    (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
 
-- 
2.50.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.