Hi all,
This patch fixes the copy_from/to_guest problem.
As Akio reported, modularised netback causes dom0's down.
The following process is happened in gnttab_transfer()@
xen/common/grant_table.c:
gnttab_transfer()
=> steal_page()
=> assign_domain_page_cmpxchg_rel()
=> domain_page_flush()
=> domain_flush_vtlb_all() // all TLBs are flushed
...
=> __copy_to_guest_offset() // always fail to copy
The embedded netback module has no problem because it uses TR pinned
data. But modularised one is out of TR. So copy_from/to_guest issue
must be solved in order to modularise drivers.
Attached patch fixes copy_from/to_guest issue for modularized netback.
But I think this implementation is not beautiful.
Thanks,
Kouya
Signed-off-by: Kouya SHIMURA <kouya@xxxxxxxxxxxxxx>
diff -r 15498beef5d8 linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c Tue Aug 08 14:42:34
2006 -0600
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c Thu Aug 10 21:25:22
2006 +0900
@@ -371,6 +371,8 @@ int
int
HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
{
+ __u64 va1,va2,pa1,pa2;
+
if (cmd == GNTTABOP_map_grant_ref) {
unsigned int i;
for (i = 0; i < count; i++) {
@@ -378,8 +380,27 @@ HYPERVISOR_grant_table_op(unsigned int c
(struct gnttab_map_grant_ref*)uop + i);
}
}
-
- return ____HYPERVISOR_grant_table_op(cmd, uop, count);
+ va1 = (__u64)uop & PAGE_MASK;
+ pa1 = pa2 = 0;
+ if ((REGION_NUMBER(va1) == 5) &&
+ ((va1 - KERNEL_START) >= KERNEL_TR_PAGE_SIZE)) {
+ pa1 = ia64_tpa(va1);
+ if (cmd <= GNTTABOP_transfer) {
+ static uint32_t uop_size[GNTTABOP_transfer+1] = {
+ sizeof(struct gnttab_map_grant_ref),
+ sizeof(struct gnttab_unmap_grant_ref),
+ sizeof(struct gnttab_setup_table),
+ sizeof(struct gnttab_dump_table),
+ sizeof(struct gnttab_transfer),
+ };
+ va2 = ((__u64)uop + uop_size[cmd]*count - 1) &
PAGE_MASK;
+ if (va1 != va2) {
+ BUG_ON (va2 > va1 + PAGE_SIZE); // maximum size
of uop is 2pages
+ pa2 = ia64_tpa(va2);
+ }
+ }
+ }
+ return ____HYPERVISOR_grant_table_op(cmd, uop, count, pa1, pa2);
}
EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
diff -r 15498beef5d8 linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Tue Aug 08 14:42:34
2006 -0600
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Thu Aug 10 21:25:22
2006 +0900
@@ -275,9 +275,9 @@ HYPERVISOR_physdev_op(
//XXX __HYPERVISOR_grant_table_op is used for this hypercall constant.
static inline int
____HYPERVISOR_grant_table_op(
- unsigned int cmd, void *uop, unsigned int count)
-{
- return _hypercall3(int, grant_table_op, cmd, uop, count);
+ unsigned int cmd, void *uop, unsigned int count, unsigned long pa1,
unsigned long pa2)
+{
+ return _hypercall5(int, grant_table_op, cmd, uop, count, pa1, pa2);
}
int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
diff -r 15498beef5d8 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Tue Aug 08 14:42:34 2006 -0600
+++ b/xen/arch/ia64/xen/hypercall.c Thu Aug 10 21:25:22 2006 +0900
@@ -101,6 +101,13 @@ xen_hypercall (struct pt_regs *regs)
xen_hypercall (struct pt_regs *regs)
{
uint32_t cmd = (uint32_t)regs->r2;
+ struct vcpu *v = current;
+
+ if (cmd == __HYPERVISOR_grant_table_op) {
+ v->arch.hypercall_param.va = regs->r15;
+ v->arch.hypercall_param.pa1 = regs->r17;
+ v->arch.hypercall_param.pa2 = regs->r18;
+ }
if (cmd < nr_hypercalls)
regs->r8 = (*ia64_hypercall_table[cmd])(
@@ -113,6 +120,7 @@ xen_hypercall (struct pt_regs *regs)
else
regs->r8 = -ENOSYS;
+ v->arch.hypercall_param.va = 0;
return IA64_NO_FAULT;
}
diff -r 15498beef5d8 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c Tue Aug 08 14:42:34 2006 -0600
+++ b/xen/arch/ia64/xen/vcpu.c Thu Aug 10 21:25:22 2006 +0900
@@ -2215,3 +2215,26 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v
return IA64_NO_FAULT;
}
+
+int ia64_map_hypercall_param(void)
+{
+ struct vcpu *v = current;
+ u64 vaddr = v->arch.hypercall_param.va & PAGE_MASK;
+ u64 pteval, logps, itir;
+ struct p2m_entry entry;
+
+ if (v->arch.hypercall_param.va == 0)
+ return FALSE;
+ itir = PAGE_SHIFT<<2;
+ pteval = (v->arch.hypercall_param.pa1 & _PAGE_PPN_MASK)|__DIRTY_BITS
|_PAGE_PL_2 |_PAGE_AR_RWX;
+ pteval = translate_domain_pte(pteval, vaddr, itir, &logps, &entry);
+ vcpu_itc_no_srlz(current, 2, vaddr, pteval, -1UL, PAGE_SHIFT);
+ if (v->arch.hypercall_param.pa2) {
+ vaddr += PAGE_SIZE;
+ pteval = (v->arch.hypercall_param.pa2 &
_PAGE_PPN_MASK)|__DIRTY_BITS |_PAGE_PL_2 |_PAGE_AR_RWX;
+ pteval = translate_domain_pte(pteval, vaddr, itir, &logps,
&entry);
+ vcpu_itc_no_srlz(current, 2, vaddr, pteval, -1UL, PAGE_SHIFT);
+ }
+ ia64_srlz_d();
+ return TRUE;
+}
diff -r 15498beef5d8 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Aug 08 14:42:34 2006 -0600
+++ b/xen/include/asm-ia64/domain.h Thu Aug 10 21:25:22 2006 +0900
@@ -142,6 +142,12 @@ struct arch_domain {
(sizeof(vcpu_info_t) * (v)->vcpu_id + \
offsetof(vcpu_info_t, evtchn_upcall_mask))
+struct hypercall_param {
+ unsigned long va;
+ unsigned long pa1;
+ unsigned long pa2;
+};
+
struct arch_vcpu {
/* Save the state of vcpu.
This is the first entry to speed up accesses. */
@@ -185,6 +191,9 @@ struct arch_vcpu {
char irq_new_pending;
char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
char hypercall_continuation;
+
+ struct hypercall_param hypercall_param; // used to remap a hypercall
parameter
+
//for phycial emulation
unsigned long old_rsc;
int mode_flags;
diff -r 15498beef5d8 xen/include/asm-ia64/uaccess.h
--- a/xen/include/asm-ia64/uaccess.h Tue Aug 08 14:42:34 2006 -0600
+++ b/xen/include/asm-ia64/uaccess.h Thu Aug 10 21:25:22 2006 +0900
@@ -211,16 +211,28 @@ extern unsigned long __must_check __copy
extern unsigned long __must_check __copy_user (void __user *to, const void
__user *from,
unsigned long count);
+extern int ia64_map_hypercall_param(void);
+
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
- return __copy_user(to, (void __user *) from, count);
+ unsigned long len;
+ len = __copy_user(to, (void __user *) from, count);
+ if (len == 0) return 0;
+ if (ia64_map_hypercall_param())
+ len = __copy_user(to, (void __user *) from, count); /* retry */
+ return len;
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
- return __copy_user((void __user *) to, from, count);
+ unsigned long len;
+ len = __copy_user((void __user *) to, from, count);
+ if (len == 0) return 0;
+ if (ia64_map_hypercall_param())
+ len = __copy_user((void __user *) to, from, count); /* retry */
+ return len;
}
#define __copy_to_user_inatomic __copy_to_user
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|