# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260981852 0
# Node ID 0d9c11acc93900270caa0a9fdf50cde73157da9c
# Parent 7a52ef05b612c8c8ea708a9175d49799a36cc4e2
xen/backends: simplify address translations
There are quite a number of places where e.g. page->va->page
translations happen.
Besides yielding smaller code (source and binary), a second goal is to
make it easier to determine where virtual addresses of pages allocated
through alloc_empty_pages_and_pagevec() are really used (in turn in
order to determine whether using highmem pages would be possible
there).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
drivers/xen/blkback/blkback.c | 14 ++++++++------
drivers/xen/blktap/blktap.c | 39 ++++++++++++++++++---------------------
drivers/xen/blktap2/blktap.h | 13 ++++++++++---
drivers/xen/blktap2/device.c | 32 +++++++++++++++-----------------
drivers/xen/blktap2/request.c | 7 +++----
drivers/xen/gntdev/gntdev.c | 8 ++++----
drivers/xen/netback/netback.c | 8 +++-----
drivers/xen/scsiback/scsiback.c | 9 ++++++---
8 files changed, 67 insertions(+), 63 deletions(-)
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/blkback/blkback.c Wed Dec 16 16:44:12 2009 +0000
@@ -94,9 +94,11 @@ static inline int vaddr_pagenr(pending_r
return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
}
+#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
+
static inline unsigned long vaddr(pending_req_t *req, int seg)
{
- unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+ unsigned long pfn = page_to_pfn(pending_page(req, seg));
return (unsigned long)pfn_to_kaddr(pfn);
}
@@ -173,7 +175,7 @@ static void fast_flush_area(pending_req_
handle = pending_handle(req, i);
if (handle == BLKBACK_INVALID_HANDLE)
continue;
- blkback_pagemap_clear(virt_to_page(vaddr(req, i)));
+ blkback_pagemap_clear(pending_page(req, i));
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
@@ -455,7 +457,7 @@ static void dispatch_rw_block_io(blkif_t
ret |= 1;
} else {
blkback_pagemap_set(vaddr_pagenr(pending_req, i),
- virt_to_page(vaddr(pending_req, i)),
+ pending_page(pending_req, i),
blkif->domid, req->handle,
req->seg[i].gref);
}
@@ -465,8 +467,8 @@ static void dispatch_rw_block_io(blkif_t
if (ret)
continue;
- set_phys_to_machine(__pa(vaddr(
- pending_req, i)) >> PAGE_SHIFT,
+ set_phys_to_machine(
+ page_to_pfn(pending_page(pending_req, i)),
FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
seg[i].buf = map[i].dev_bus_addr |
(req->seg[i].first_sect << 9);
@@ -497,7 +499,7 @@ static void dispatch_rw_block_io(blkif_t
while ((bio == NULL) ||
(bio_add_page(bio,
- virt_to_page(vaddr(pending_req, i)),
+ pending_page(pending_req, i),
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
if (bio) {
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/blktap/blktap.c
--- a/drivers/xen/blktap/blktap.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/blktap/blktap.c Wed Dec 16 16:44:12 2009 +0000
@@ -168,11 +168,16 @@ static inline unsigned int RTN_PEND_IDX(
#define BLKBACK_INVALID_HANDLE (~0)
static struct page **foreign_pages[MAX_DYNAMIC_MEM];
+static inline struct page *idx_to_page(
+ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
+{
+ unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
+ return foreign_pages[mmap_idx][arr_idx];
+}
static inline unsigned long idx_to_kaddr(
unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
{
- unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
- unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
+ unsigned long pfn = page_to_pfn(idx_to_page(mmap_idx,req_idx,sg_idx));
return (unsigned long)pfn_to_kaddr(pfn);
}
@@ -345,7 +350,7 @@ static pte_t blktap_clear_pte(struct vm_
mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, seg);
ClearPageReserved(pg);
info->foreign_map.map[offset + RING_PAGES] = NULL;
@@ -1041,7 +1046,7 @@ static void fast_flush_area(pending_req_
struct grant_handle_pair *khandle;
uint64_t ptep;
int ret, mmap_idx;
- unsigned long kvaddr, uvaddr;
+ unsigned long uvaddr;
tap_blkif_t *info;
struct mm_struct *mm;
@@ -1067,7 +1072,6 @@ static void fast_flush_area(pending_req_
mmap_idx = req->mem_idx;
for (i = 0; i < req->nr_pages; i++) {
- kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
khandle = &pending_handle(mmap_idx, k_idx, i);
@@ -1079,8 +1083,8 @@ static void fast_flush_area(pending_req_
invcount++;
set_phys_to_machine(
- __pa(idx_to_kaddr(mmap_idx, k_idx, i))
- >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+ page_to_pfn(idx_to_page(mmap_idx, k_idx, i)),
+ INVALID_P2M_ENTRY);
}
if (khandle->user != INVALID_GRANT_HANDLE) {
@@ -1228,14 +1232,13 @@ static int blktap_read_ufe_ring(tap_blki
for (j = 0; j < pending_req->nr_pages; j++) {
- unsigned long kvaddr, uvaddr;
+ unsigned long uvaddr;
struct page *pg;
int offset;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
-
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+
+ pg = idx_to_page(mmap_idx, pending_idx, j);
ClearPageReserved(pg);
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
info->foreign_map.map[offset] = NULL;
@@ -1496,12 +1499,10 @@ static void dispatch_rw_block_io(blkif_t
for (i = 0; i < (nseg*2); i+=2) {
unsigned long uvaddr;
- unsigned long kvaddr;
unsigned long offset;
struct page *pg;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
if (unlikely(map[i].status != 0)) {
WPRINTK("invalid kernel buffer -- "
@@ -1525,22 +1526,20 @@ static void dispatch_rw_block_io(blkif_t
if (ret)
continue;
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ pg = idx_to_page(mmap_idx, pending_idx, i/2);
+ set_phys_to_machine(page_to_pfn(pg),
FOREIGN_FRAME(map[i].dev_bus_addr
>> PAGE_SHIFT));
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
info->foreign_map.map[offset] = pg;
}
} else {
for (i = 0; i < nseg; i++) {
unsigned long uvaddr;
- unsigned long kvaddr;
unsigned long offset;
struct page *pg;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
if (unlikely(map[i].status != 0)) {
WPRINTK("invalid kernel buffer -- "
@@ -1556,7 +1555,7 @@ static void dispatch_rw_block_io(blkif_t
continue;
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, i);
info->foreign_map.map[offset] = pg;
}
}
@@ -1568,11 +1567,9 @@ static void dispatch_rw_block_io(blkif_t
down_write(&mm->mmap_sem);
/* Mark mapped pages as reserved: */
for (i = 0; i < req->nr_segments; i++) {
- unsigned long kvaddr;
struct page *pg;
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, i);
SetPageReserved(pg);
if (xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long uvaddr = MMAP_VADDR(info->user_vstart,
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/blktap2/blktap.h
--- a/drivers/xen/blktap2/blktap.h Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/blktap2/blktap.h Wed Dec 16 16:44:12 2009 +0000
@@ -242,6 +242,13 @@ int blktap_request_pool_shrink(void);
int blktap_request_pool_shrink(void);
struct blktap_request *blktap_request_allocate(struct blktap *);
void blktap_request_free(struct blktap *, struct blktap_request *);
-unsigned long request_to_kaddr(struct blktap_request *, int);
-
-#endif
+struct page *request_to_page(struct blktap_request *, int);
+
+static inline unsigned long
+request_to_kaddr(struct blktap_request *req, int seg)
+{
+ unsigned long pfn = page_to_pfn(request_to_page(req, seg));
+ return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+#endif
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/blktap2/device.c
--- a/drivers/xen/blktap2/device.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/blktap2/device.c Wed Dec 16 16:44:12 2009 +0000
@@ -327,16 +327,15 @@ blktap_unmap(struct blktap *tap, struct
down_write(&tap->ring.vma->vm_mm->mmap_sem);
for (i = 0; i < request->nr_pages; i++) {
+ kvaddr = request_to_kaddr(request, i);
BTDBG("request: %p, seg: %d, kvaddr: 0x%08lx, khandle: %u, "
"uvaddr: 0x%08lx, uhandle: %u\n", request, i,
- request_to_kaddr(request, i),
- request->handles[i].kernel,
+ kvaddr, request->handles[i].kernel,
MMAP_VADDR(tap->ring.user_vstart, usr_idx, i),
request->handles[i].user);
if (!xen_feature(XENFEAT_auto_translated_physmap) &&
request->handles[i].kernel == INVALID_GRANT_HANDLE) {
- kvaddr = request_to_kaddr(request, i);
blktap_umap_uaddr(&init_mm, kvaddr);
flush_tlb_kernel_page(kvaddr);
set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
@@ -459,7 +458,7 @@ blktap_prep_foreign(struct blktap *tap,
table->cnt++;
/* enable chained tap devices */
- tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ tap_page = request_to_page(request, seg);
set_page_private(tap_page, page_private(page));
SetPageBlkback(tap_page);
@@ -488,7 +487,7 @@ blktap_map_foreign(struct blktap *tap,
struct page *page;
int i, grant, err, usr_idx;
struct blktap_ring *ring;
- unsigned long uvaddr, kvaddr, foreign_mfn;
+ unsigned long uvaddr, foreign_mfn;
if (!table->cnt)
return 0;
@@ -506,7 +505,6 @@ blktap_map_foreign(struct blktap *tap,
continue;
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
- kvaddr = request_to_kaddr(request, i);
if (unlikely(table->grants[grant].status)) {
BTERR("invalid kernel buffer: could not remap it\n");
@@ -534,18 +532,19 @@ blktap_map_foreign(struct blktap *tap,
if (err)
continue;
- page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ page = request_to_page(request, i);
if (!xen_feature(XENFEAT_auto_translated_physmap))
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ set_phys_to_machine(page_to_pfn(page),
FOREIGN_FRAME(foreign_mfn));
else if (vm_insert_page(ring->vma, uvaddr, page))
err |= 1;
BTDBG("pending_req: %p, seg: %d, page: %p, "
- "kvaddr: 0x%08lx, khandle: %u, uvaddr: 0x%08lx, "
+ "kvaddr: 0x%p, khandle: %u, uvaddr: 0x%08lx, "
"uhandle: %u\n", request, i, page,
- kvaddr, request->handles[i].kernel,
+ pfn_to_kaddr(page_to_pfn(page)),
+ request->handles[i].kernel,
uvaddr, request->handles[i].user);
}
@@ -598,7 +597,7 @@ blktap_map(struct blktap *tap,
gnttab_set_map_op(&map, kvaddr, flags, gref, domid);
/* enable chained tap devices */
- tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ tap_page = request_to_page(request, seg);
set_page_private(tap_page, page_private(page));
SetPageBlkback(tap_page);
@@ -636,7 +635,7 @@ blktap_device_process_request(struct blk
struct scatterlist *sg;
struct blktap_grant_table table;
unsigned int fsect, lsect, nr_sects;
- unsigned long offset, uvaddr, kvaddr;
+ unsigned long offset, uvaddr;
struct blkif_request blkif_req, *target;
err = -1;
@@ -693,18 +692,17 @@ blktap_device_process_request(struct blk
}
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
- kvaddr = request_to_kaddr(request, i);
offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT;
- page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ page = request_to_page(request, i);
ring->foreign_map.map[offset] = page;
SetPageReserved(page);
BTDBG("mapped uaddr %08lx to page %p pfn 0x%lx\n",
- uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT);
+ uvaddr, page, page_to_pfn(page));
BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, "
- "page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n",
+ "page: %p, kvaddr: %p, uvaddr: 0x%08lx\n",
offset, request, i,
- page, kvaddr, uvaddr);
+ page, pfn_to_kaddr(page_to_pfn(page)), uvaddr);
request->nr_pages++;
}
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/blktap2/request.c
--- a/drivers/xen/blktap2/request.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/blktap2/request.c Wed Dec 16 16:44:12 2009 +0000
@@ -123,13 +123,12 @@ blktap_request_pool_free_bucket(struct b
kfree(bucket);
}
-unsigned long
-request_to_kaddr(struct blktap_request *req, int seg)
+struct page *
+request_to_page(struct blktap_request *req, int seg)
{
struct blktap_request_handle *handle = blktap_request_to_handle(req);
int idx = handle->slot * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
- unsigned long pfn = page_to_pfn(handle->bucket->foreign_pages[idx]);
- return (unsigned long)pfn_to_kaddr(pfn);
+ return handle->bucket->foreign_pages[idx];
}
int
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/gntdev/gntdev.c
--- a/drivers/xen/gntdev/gntdev.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/gntdev/gntdev.c Wed Dec 16 16:44:12 2009 +0000
@@ -586,7 +586,7 @@ static int gntdev_mmap (struct file *fli
kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
user_vaddr = get_user_vaddr(vma, i);
- page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
+ page = private_data->foreign_pages[slot_index + i];
gnttab_set_map_op(&op, kernel_vaddr, flags,
private_data->grants[slot_index+i]
@@ -804,9 +804,9 @@ static pte_t gntdev_clear_pte(struct vm_
GNTDEV_SLOT_NOT_YET_MAPPED;
/* Invalidate the physical to machine mapping for this page. */
- set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
- slot_index))
- >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+ set_phys_to_machine(
+ page_to_pfn(private_data->foreign_pages[slot_index]),
+ INVALID_P2M_ENTRY);
} else {
pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/netback/netback.c
--- a/drivers/xen/netback/netback.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/netback/netback.c Wed Dec 16 16:44:12 2009 +0000
@@ -1086,8 +1086,7 @@ static int netbk_tx_check_mop(struct sk_
pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
netif_put(netif);
} else {
- set_phys_to_machine(
- __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
+ set_phys_to_machine(idx_to_pfn(pending_idx),
FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
grant_tx_handle[pending_idx] = mop->handle;
}
@@ -1103,8 +1102,7 @@ static int netbk_tx_check_mop(struct sk_
/* Check error status: if okay then remember grant handle. */
newerr = (++mop)->status;
if (likely(!newerr)) {
- set_phys_to_machine(
- __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
+ set_phys_to_machine(idx_to_pfn(pending_idx),
FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
grant_tx_handle[pending_idx] = mop->handle;
/* Had a previous error? Invalidate this fragment. */
@@ -1157,7 +1155,7 @@ static void netbk_fill_frags(struct sk_b
&pending_inuse_head);
txp = &pending_tx_info[pending_idx].req;
- frag->page = virt_to_page(idx_to_kaddr(pending_idx));
+ frag->page = mmap_pages[pending_idx];
frag->size = txp->size;
frag->page_offset = txp->offset;
diff -r 7a52ef05b612 -r 0d9c11acc939 drivers/xen/scsiback/scsiback.c
--- a/drivers/xen/scsiback/scsiback.c Wed Dec 16 16:43:33 2009 +0000
+++ b/drivers/xen/scsiback/scsiback.c Wed Dec 16 16:44:12 2009 +0000
@@ -281,6 +281,8 @@ static int scsiback_gnttab_data_map(vscs
BUG_ON(err);
for (i = 0; i < nr_segments; i++) {
+ struct page *pg;
+
if (unlikely(map[i].status != 0)) {
printk(KERN_ERR "scsiback: invalid buffer --
could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
@@ -292,11 +294,12 @@ static int scsiback_gnttab_data_map(vscs
if (err)
continue;
- set_phys_to_machine(__pa(vaddr(
- pending_req, i)) >> PAGE_SHIFT,
+ pg = pending_pages[vaddr_pagenr(pending_req, i)];
+
+ set_phys_to_machine(page_to_pfn(pg),
FOREIGN_FRAME(map[i].dev_bus_addr >>
PAGE_SHIFT));
- pending_req->sgl[i].page =
virt_to_page(vaddr(pending_req, i));
+ pending_req->sgl[i].page = pg;
pending_req->sgl[i].offset = ring_req->seg[i].offset;
pending_req->sgl[i].length = ring_req->seg[i].length;
data_len += pending_req->sgl[i].length;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|