# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1188283175 -32400
# Node ID 07bf1e74559d4f4a0718632dfd06bd1f5d17e353
# Parent 057b47cada5c42a934c320f95863af2a6f031522
foreign p2m exposure. linux side.
PATCHNAME: foreign_p2m_exposure_linux_side
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r 057b47cada5c -r 07bf1e74559d arch/ia64/xen/hypervisor.c
--- a/arch/ia64/xen/hypervisor.c Thu Aug 23 15:18:40 2007 -0600
+++ b/arch/ia64/xen/hypervisor.c Tue Aug 28 15:39:35 2007 +0900
@@ -584,6 +584,10 @@ struct xen_ia64_privcmd_range {
unsigned long pgoff; // in PAGE_SIZE
struct resource* res;
+ // for foreign domain p2m mapping
+ void* private;
+ void (*callback)(struct xen_ia64_privcmd_range* range, void* arg);
+
unsigned long num_entries;
struct xen_ia64_privcmd_entry entries[0];
};
@@ -765,6 +769,9 @@ xen_ia64_privcmd_vma_close(struct vm_are
BUG_ON(entry->gpfn != INVALID_GPFN);
}
#endif
+ if (privcmd_range->callback)
+ (*privcmd_range->callback)(privcmd_range,
+ privcmd_range->private);
release_resource(privcmd_range->res);
kfree(privcmd_range->res);
vfree(privcmd_range);
@@ -825,6 +832,8 @@ privcmd_mmap(struct file * file, struct
atomic_set(&privcmd_range->ref_count, 1);
privcmd_range->pgoff = vma->vm_pgoff;
privcmd_range->num_entries = num_entries;
+ privcmd_range->private = NULL;
+ privcmd_range->callback = NULL;
for (i = 0; i < privcmd_range->num_entries; i++) {
xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
}
@@ -979,6 +988,12 @@ static struct notifier_block p2m_expose_
};
#endif
+static inline unsigned long
+p2m_table_size(unsigned long num_pfn)
+{
+ return ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
+}
+
static int
p2m_expose_init(void)
{
@@ -1018,8 +1033,7 @@ p2m_expose_init(void)
if (xen_ia64_p2m_expose_use_dtr) {
unsigned long page_size = 0;
unsigned long granule_pfn = 0;
- p2m_size = ((p2m_max_low_pfn - p2m_min_low_pfn +
- PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
+ p2m_size = p2m_table_size(p2m_max_low_pfn - p2m_min_low_pfn);
for (i = 0;
i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
i++) {
@@ -1036,8 +1050,7 @@ p2m_expose_init(void)
granule_pfn);
num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
p2m_expose_size = num_pfn << PAGE_SHIFT;
- p2m_size = ((num_pfn + PTRS_PER_PTE - 1) /
- PTRS_PER_PTE) << PAGE_SHIFT;
+ p2m_size = p2m_table_size(num_pfn);
p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
if (p2m_size == page_size)
break;
@@ -1057,8 +1070,7 @@ p2m_expose_init(void)
p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
p2m_expose_size = num_pfn << PAGE_SHIFT;
- p2m_size = ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) <<
- PAGE_SHIFT;
+ p2m_size = p2m_table_size(num_pfn);
p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
align = max(privcmd_resource_align,
p2m_granule_pfn << PAGE_SHIFT);
@@ -1204,6 +1216,205 @@ EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
EXPORT_SYMBOL_GPL(p2m_pte);
EXPORT_SYMBOL_GPL(p2m_phystomach);
+
+///////////////////////////////////////////////////////////////////////////
+// foreign domain p2m mapping
+#include <asm/xen/xencomm.h>
+#include <xen/public/privcmd.h>
+
+struct foreign_p2m_private {
+ unsigned long gpfn;
+ domid_t domid;
+};
+
+static void
+xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range* privcmd_range,
+ void* arg)
+{
+ struct foreign_p2m_private* private = (struct foreign_p2m_private*)arg;
+ int ret;
+
+ privcmd_range->private = NULL;
+ privcmd_range->callback = NULL;
+
+ ret = HYPERVISOR_unexpose_foreign_p2m(private->gpfn, private->domid);
+ if (ret)
+ printk(KERN_WARNING
+ "unexpose_foreign_p2m hypercall failed.\n");
+ kfree(private);
+}
+
+int
+xen_foreign_p2m_expose(privcmd_hypercall_t* hypercall)
+{
+ // hypercall->
+ // arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
+ // arg1: va
+ // arg2: domid
+ // arg3: __user* memmap_info
+ // arg4: flags
+
+ int ret = 0;
+ struct mm_struct* mm = current->mm;
+
+ unsigned long vaddr = hypercall->arg[1];
+ domid_t domid = hypercall->arg[2];
+ struct xen_ia64_memmap_info __user *u_memmap_info =
+ (struct xen_ia64_memmap_info __user *)hypercall->arg[3];
+
+ struct xen_ia64_memmap_info memmap_info;
+ size_t memmap_size;
+ struct xen_ia64_memmap_info* k_memmap_info = NULL;
+ unsigned long max_gpfn;
+ unsigned long p2m_size;
+ struct resource* res;
+ unsigned long gpfn;
+
+ struct vm_area_struct* vma;
+ void* p;
+ unsigned long prev_src_gpfn_end;
+
+ struct xen_ia64_privcmd_vma* privcmd_vma;
+ struct xen_ia64_privcmd_range* privcmd_range;
+ struct foreign_p2m_private* private = NULL;
+
+ BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
+
+ private = kmalloc(sizeof(*private), GFP_KERNEL);
+ if (private == NULL)
+ goto kfree_out;
+
+ if (copy_from_user(&memmap_info, u_memmap_info, sizeof(memmap_info)))
+ return -EFAULT;
+ /* memmap_info integrity check */
+ if (memmap_info.efi_memdesc_size < sizeof(efi_memory_desc_t) ||
+ memmap_info.efi_memmap_size < memmap_info.efi_memdesc_size ||
+ (memmap_info.efi_memmap_size % memmap_info.efi_memdesc_size)
+ != 0) {
+ ret = -EINVAL;
+ goto kfree_out;
+ }
+
+ memmap_size = sizeof(*k_memmap_info) + memmap_info.efi_memmap_size;
+ k_memmap_info = kmalloc(memmap_size, GFP_KERNEL);
+ if (k_memmap_info == NULL)
+ return -ENOMEM;
+ if (copy_from_user(k_memmap_info, u_memmap_info, memmap_size)) {
+ ret = -EFAULT;
+ goto kfree_out;
+ }
+ /* k_memmap_info integrity check is done by the expose foreng p2m
+ hypercall */
+
+ max_gpfn = HYPERVISOR_memory_op(XENMEM_maximum_gpfn, &domid);
+ if (max_gpfn < 0) {
+ ret = max_gpfn;
+ goto kfree_out;
+ }
+ p2m_size = p2m_table_size(max_gpfn + 1);
+
+ down_write(&mm->mmap_sem);
+
+ vma = find_vma(mm, vaddr);
+ if (vma == NULL || vma->vm_ops != &xen_ia64_privcmd_vm_ops ||
+ vaddr != vma->vm_start ||
+ (vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC) ||
+ !privcmd_enforce_singleshot_mapping(vma))
+ goto mmap_out;
+
+ privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+ res = privcmd_vma->range->res;
+ if (p2m_size > (res->end - res->start + 1) ||
+ p2m_size > vma->vm_end - vma->vm_start) {
+ ret = -EINVAL;
+ goto mmap_out;
+ }
+
+ gpfn = res->start >> PAGE_SHIFT;
+ // arg0: dest_gpfn
+ // arg1: domid
+ // arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
+ // arg3: flags
+ // The hypercall checks its intergirty/simplfies it and
+ // copy it back for us.
+ ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
+ xencomm_map_no_alloc(k_memmap_info, memmap_size),
+ hypercall->arg[4]);
+ if (ret)
+ goto mmap_out;
+
+ privcmd_range = (struct xen_ia64_privcmd_range*)privcmd_vma->range;
+ prev_src_gpfn_end = 0;
+ for (p = k_memmap_info->memdesc;
+ p < (void*)&k_memmap_info->memdesc[0] +
+ k_memmap_info->efi_memmap_size;
+ p += k_memmap_info->efi_memdesc_size) {
+ efi_memory_desc_t* md = p;
+ unsigned long src_gpfn = md->phys_addr >> PAGE_SHIFT;
+ unsigned long src_gpfn_end =
+ (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
+ PAGE_SHIFT;
+ unsigned long num_src_gpfn;
+ unsigned long gpfn_offset;
+ unsigned long size;
+ unsigned int i;
+
+ if (src_gpfn <= prev_src_gpfn_end)
+ src_gpfn = prev_src_gpfn_end + 1;
+ if (src_gpfn_end <= prev_src_gpfn_end)
+ continue;
+
+ src_gpfn &= ~(PTRS_PER_PTE - 1);
+ src_gpfn_end = (src_gpfn_end + PTRS_PER_PTE - 1) &
+ ~(PTRS_PER_PTE - 1);
+ num_src_gpfn = src_gpfn_end - src_gpfn;
+ gpfn_offset = src_gpfn / PTRS_PER_PTE;
+ size = p2m_table_size(num_src_gpfn);
+
+ prev_src_gpfn_end = src_gpfn_end;
+ ret = remap_pfn_range(vma,
+ vaddr + (gpfn_offset << PAGE_SHIFT),
+ gpfn + gpfn_offset, size,
+ vma->vm_page_prot);
+ if (ret) {
+ for (i = 0; i < gpfn + gpfn_offset; i++) {
+ struct xen_ia64_privcmd_entry* entry =
+ &privcmd_range->entries[i];
+ BUG_ON(atomic_read(&entry->map_count) != 1 &&
+ atomic_read(&entry->map_count) != 0);
+ atomic_set(&entry->map_count, 0);
+ entry->gpfn = INVALID_GPFN;
+ }
+ (void)HYPERVISOR_unexpose_foreign_p2m(gpfn, domid);
+ goto mmap_out;
+ }
+
+ for (i = gpfn_offset;
+ i < gpfn_offset + (size >> PAGE_SHIFT);
+ i++) {
+ struct xen_ia64_privcmd_entry* entry =
+ &privcmd_range->entries[i];
+ BUG_ON(atomic_read(&entry->map_count) != 0);
+ BUG_ON(entry->gpfn != INVALID_GPFN);
+ atomic_inc(&entry->map_count);
+ entry->gpfn = gpfn + i;
+ }
+ }
+
+ private->gpfn = gpfn;
+ private->domid = domid;
+
+ privcmd_range->callback = &xen_foreign_p2m_unexpose;
+ privcmd_range->private = private;
+
+mmap_out:
+ up_write(&mm->mmap_sem);
+kfree_out:
+ kfree(k_memmap_info);
+ if (ret != 0)
+ kfree(private);
+ return ret;
+}
#endif
///////////////////////////////////////////////////////////////////////////
diff -r 057b47cada5c -r 07bf1e74559d arch/ia64/xen/xcom_hcall.c
--- a/arch/ia64/xen/xcom_hcall.c Thu Aug 23 15:18:40 2007 -0600
+++ b/arch/ia64/xen/xcom_hcall.c Tue Aug 28 15:39:35 2007 +0900
@@ -381,7 +381,11 @@ xencomm_hypercall_memory_op(unsigned int
return rc;
xc_area++;
break;
-
+
+ case XENMEM_maximum_gpfn:
+ argsize = 0;
+ break;
+
case XENMEM_maximum_ram_page:
argsize = 0;
break;
diff -r 057b47cada5c -r 07bf1e74559d arch/ia64/xen/xcom_privcmd.c
--- a/arch/ia64/xen/xcom_privcmd.c Thu Aug 23 15:18:40 2007 -0600
+++ b/arch/ia64/xen/xcom_privcmd.c Tue Aug 28 15:39:35 2007 +0900
@@ -732,6 +732,9 @@ xencomm_privcmd_ia64_dom0vp_op(privcmd_h
ret = -EFAULT;
break;
}
+ case IA64_DOM0VP_expose_foreign_p2m:
+ ret = xen_foreign_p2m_expose(hypercall);
+ break;
default:
printk("%s: unknown IA64 DOM0VP op %d\n", __func__, cmd);
ret = -EINVAL;
diff -r 057b47cada5c -r 07bf1e74559d include/asm-ia64/hypercall.h
--- a/include/asm-ia64/hypercall.h Thu Aug 23 15:18:40 2007 -0600
+++ b/include/asm-ia64/hypercall.h Tue Aug 28 15:39:35 2007 +0900
@@ -373,6 +373,23 @@ HYPERVISOR_expose_p2m(unsigned long conv
return _hypercall5(unsigned long, ia64_dom0vp_op,
IA64_DOM0VP_expose_p2m, conv_start_gpfn,
assign_start_gpfn, expose_size, granule_pfn);
+}
+
+static inline int
+xencomm_arch_expose_foreign_p2m(unsigned long gpfn,
+ domid_t domid, struct xencomm_handle *arg,
+ unsigned long flags)
+{
+ return _hypercall5(int, ia64_dom0vp_op,
+ IA64_DOM0VP_expose_foreign_p2m,
+ gpfn, domid, arg, flags);
+}
+
+static inline int
+HYPERVISOR_unexpose_foreign_p2m(unsigned long gpfn, domid_t domid)
+{
+ return _hypercall3(int, ia64_dom0vp_op,
+ IA64_DOM0VP_unexpose_foreign_p2m, gpfn, domid);
}
#endif
diff -r 057b47cada5c -r 07bf1e74559d include/asm-ia64/xen/xcom_hcall.h
--- a/include/asm-ia64/xen/xcom_hcall.h Thu Aug 23 15:18:40 2007 -0600
+++ b/include/asm-ia64/xen/xcom_hcall.h Tue Aug 28 15:39:35 2007 +0900
@@ -60,4 +60,6 @@ struct privcmd_hypercall;
struct privcmd_hypercall;
extern int privcmd_hypercall(struct privcmd_hypercall *hypercall);
+extern int xen_foreign_p2m_expose(struct privcmd_hypercall *hypercall);
+
#endif /* _LINUX_XENCOMM_HCALL_H_ */
diff -r 057b47cada5c -r 07bf1e74559d include/xen/interface/arch-ia64.h
--- a/include/xen/interface/arch-ia64.h Thu Aug 23 15:18:40 2007 -0600
+++ b/include/xen/interface/arch-ia64.h Tue Aug 28 15:39:35 2007 +0900
@@ -470,6 +470,13 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
/* Add an I/O port space range */
#define IA64_DOM0VP_add_io_space 11
+/* expose the foreign domain's p2m table into privileged domain */
+#define IA64_DOM0VP_expose_foreign_p2m 12
+#define IA64_DOM0VP_EFP_ALLOC_PTE 0x1 /* allocate p2m table */
+
+/* unexpose the foreign domain's p2m table into privileged domain */
+#define IA64_DOM0VP_unexpose_foreign_p2m 13
+
// flags for page assignement to pseudo physical address space
#define _ASSIGN_readonly 0
#define ASSIGN_readonly (1UL << _ASSIGN_readonly)
190_07bf1e74559d_foreign_p2m_exposure_linux_side.patch
Description: Text Data
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|