# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 2c823d27cf3392d986d16f57c7431a7f65dd856f
# Parent 5cd24dd33033224243fd0c34dd66a52fa8b27e30
Catchup with xen-unstable, add ia64 specifics to tools, and some VTI merge
Signed-off-by Kevin Tian <Kevin.tian@xxxxxxxxx>
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/libxc/Makefile
--- a/tools/libxc/Makefile Tue Sep 13 19:08:00 2005
+++ b/tools/libxc/Makefile Tue Sep 13 19:42:33 2005
@@ -23,6 +23,10 @@
SRCS += xc_physdev.c
SRCS += xc_private.c
SRCS += xc_sedf.c
+BUILD_SRCS += xc_linux_build.c
+BUILD_SRCS += xc_load_bin.c
+BUILD_SRCS += xc_load_elf.c
+BUILD_SRCS += xg_private.c
ifeq ($(XEN_TARGET_ARCH),ia64)
BUILD_SRCS += xc_ia64_stubs.c
@@ -31,13 +35,9 @@
SRCS += xc_ptrace_core.c
BUILD_SRCS := xc_load_aout9.c
-BUILD_SRCS += xc_load_bin.c
-BUILD_SRCS += xc_load_elf.c
-BUILD_SRCS += xc_linux_build.c
BUILD_SRCS += xc_linux_restore.c
BUILD_SRCS += xc_linux_save.c
BUILD_SRCS += xc_vmx_build.c
-BUILD_SRCS += xg_private.c
endif
CFLAGS += -Wall
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/libxc/xc_ia64_stubs.c
--- a/tools/libxc/xc_ia64_stubs.c Tue Sep 13 19:08:00 2005
+++ b/tools/libxc/xc_ia64_stubs.c Tue Sep 13 19:42:33 2005
@@ -1,12 +1,16 @@
-#include "xc_private.h"
+#include "xg_private.h"
+#include "xenguest.h"
-int xc_linux_save(int xc_handle, int io_fd, u32 dom)
+int xc_linux_save(int xc_handle, int io_fd, u32 dom, u32 max_iters,
+ u32 max_factor, u32 flags)
{
PERROR("xc_linux_save not implemented\n");
return -1;
}
-int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns)
+int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns,
+ unsigned int store_evtchn, unsigned long *store_mfn,
+ unsigned int console_evtchn, unsigned long *console_mfn)
{
PERROR("xc_linux_restore not implemented\n");
return -1;
@@ -20,7 +24,10 @@
const char *ramdisk_name,
const char *cmdline,
unsigned int control_evtchn,
- unsigned long flags)
+ unsigned long flags,
+ unsigned int vcpus,
+ unsigned int store_evtchn,
+ unsigned long *store_mfn)
{
PERROR("xc_vmx_build not implemented\n");
return -1;
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c Tue Sep 13 19:08:00 2005
+++ b/tools/libxc/xc_linux_build.c Tue Sep 13 19:42:33 2005
@@ -296,12 +296,14 @@
unsigned long shared_info_frame,
unsigned long flags,
unsigned int vcpus,
- unsigned int store_evtchn, unsigned long *store_mfn)
+ unsigned int store_evtchn, unsigned long *store_mfn,
+ unsigned int console_evtchn, unsigned long
*console_mfn)
{
unsigned long *page_array = NULL;
struct load_funcs load_funcs;
struct domain_setup_info dsi;
- unsigned long start_page;
+ unsigned long start_page, pgnr;
+ start_info_t *start_info;
int rc;
rc = probeimageformat(image, image_size, &load_funcs);
@@ -318,14 +320,14 @@
dsi.v_end = round_pgup(dsi.v_end);
start_page = dsi.v_start >> PAGE_SHIFT;
- nr_pages = (dsi.v_end - dsi.v_start) >> PAGE_SHIFT;
- if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
+ pgnr = (dsi.v_end - dsi.v_start) >> PAGE_SHIFT;
+ if ( (page_array = malloc(pgnr * sizeof(unsigned long))) == NULL )
{
PERROR("Could not allocate memory");
goto error_out;
}
- if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, start_page,
nr_pages) != nr_pages )
+ if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, start_page, pgnr) !=
pgnr )
{
PERROR("Could not get the page frame list");
goto error_out;
@@ -335,6 +337,33 @@
&dsi);
*pvke = dsi.v_kernentry;
+
+ /* Now need to retrieve machine pfn for system pages:
+ * start_info/store/console
+ */
+ pgnr = 3;
+ if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, nr_pages - 3, pgnr)
!= pgnr)
+ {
+ PERROR("Could not get page frame for xenstore");
+ goto error_out;
+ }
+
+ *store_mfn = page_array[1];
+ *console_mfn = page_array[2];
+ printf("store_mfn: 0x%lx, console_mfn: 0x%lx\n",
+ (u64)store_mfn, (u64)console_mfn);
+
+ start_info = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
+ memset(start_info, 0, sizeof(*start_info));
+ start_info->flags = flags;
+ start_info->store_mfn = nr_pages - 2;
+ start_info->store_evtchn = store_evtchn;
+ start_info->console_mfn = nr_pages - 1;
+ start_info->console_evtchn = console_evtchn;
+ munmap(start_info, PAGE_SIZE);
+
+ free(page_array);
return 0;
error_out:
@@ -674,7 +703,12 @@
unsigned long image_size, initrd_size=0;
unsigned long vstartinfo_start, vkern_entry, vstack_start;
+#ifdef __ia64__
+ /* Current xen/ia64 allocates domU pages on demand */
+ if ( (nr_pages = xc_get_max_pages(xc_handle, domid)) < 0 )
+#else
if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
+#endif
{
PERROR("Could not find total pages for domain");
goto error_out;
@@ -753,13 +787,16 @@
#ifdef __ia64__
/* based on new_thread in xen/arch/ia64/domain.c */
+ ctxt->flags = 0;
+ ctxt->shared.flags = flags;
+ ctxt->shared.start_info_pfn = nr_pages - 3; // metaphysical
ctxt->regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
ctxt->regs.cr_iip = vkern_entry;
ctxt->regs.cr_ifs = 1UL << 63;
ctxt->regs.ar_fpsr = FPSR_DEFAULT;
/* ctxt->regs.r28 = dom_fw_setup(); currently done by hypervisor, should
move here */
ctxt->vcpu.privregs = 0;
- ctxt->shared.flags = flags;
+ ctxt->sys_pgnr = nr_pages - 3;
i = 0; /* silence unused variable warning */
#else /* x86 */
/*
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c Tue Sep 13 19:08:00 2005
+++ b/tools/libxc/xc_private.c Tue Sep 13 19:42:33 2005
@@ -351,6 +351,15 @@
return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
}
+
+long xc_get_max_pages(int xc_handle, u32 domid)
+{
+ dom0_op_t op;
+ op.cmd = DOM0_GETDOMAININFO;
+ op.u.getdomaininfo.domain = (domid_t)domid;
+ return (do_dom0_op(xc_handle, &op) < 0) ?
+ -1 : op.u.getdomaininfo.max_pages;
+}
#endif
long xc_get_tot_pages(int xc_handle, u32 domid)
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Tue Sep 13 19:08:00 2005
+++ b/tools/libxc/xenctrl.h Tue Sep 13 19:42:33 2005
@@ -440,6 +440,8 @@
int xc_ia64_get_pfn_list(int xc_handle, u32 domid, unsigned long *pfn_buf,
unsigned int start_page, unsigned int nr_pages);
+long xc_get_max_pages(int xc_handle, u32 domid);
+
int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
domid_t dom);
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h Tue Sep 13 19:08:00 2005
+++ b/tools/libxc/xg_private.h Tue Sep 13 19:42:33 2005
@@ -1,6 +1,7 @@
#ifndef XG_PRIVATE_H
#define XG_PRIVATE_H
+#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
diff -r 5cd24dd33033 -r 2c823d27cf33 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py Tue Sep 13 19:08:00 2005
+++ b/tools/python/xen/xend/image.py Tue Sep 13 19:42:33 2005
@@ -222,7 +222,11 @@
def getDomainMemory(self, mem_mb):
"""Memory (in KB) the domain will need for mem_mb (in MB)."""
- return mem_mb * 1024
+ if os.uname()[4] == 'ia64':
+ """Append extra system pages, like xenstore and console"""
+ return (mem_mb * 1024 + 3 * 16)
+ else:
+ return mem_mb * 1024
def buildDomain(self):
"""Build the domain. Define in subclass."""
@@ -457,5 +461,8 @@
# 1 page for the PGD + 1 pte page for 4MB of memory (rounded)
if os.uname()[4] == 'x86_64':
return (5 + ((mem_mb + 1) >> 1)) * 4
+ elif os.uname()[4] == 'ia64':
+ # XEN/IA64 has p2m table allocated on demand, so only return guest
firmware size here.
+ return 16 * 1024
else:
return (1 + ((mem_mb + 3) >> 2)) * 4
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/vmx/vmx_init.c Tue Sep 13 19:42:33 2005
@@ -164,7 +164,6 @@
/* FIXME: only support PMT table continuously by far */
d->arch.pmt = __va(c->pt_base);
- d->arch.max_pfn = c->pt_max_pfn;
vmx_final_setup_domain(d);
}
@@ -373,3 +372,119 @@
/* Other vmx specific initialization work */
}
+
+/*
+ * Following stuff should really move to domain builder. However currently
+ * XEN/IA64 doesn't export physical -> machine page table to domain builder,
+ * instead only the copy. Also there's no hypercall to notify hypervisor
+ * IO ranges by far. Let's enhance it later.
+ */
+
+#define MEM_G (1UL << 30)
+#define MEM_M (1UL << 20)
+
+#define MMIO_START (3 * MEM_G)
+#define MMIO_SIZE (512 * MEM_M)
+
+#define VGA_IO_START 0xA0000UL
+#define VGA_IO_SIZE 0x20000
+
+#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
+#define LEGACY_IO_SIZE (64*MEM_M)
+
+#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
+#define IO_PAGE_SIZE PAGE_SIZE
+
+#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
+#define STORE_PAGE_SIZE PAGE_SIZE
+
+#define IO_SAPIC_START 0xfec00000UL
+#define IO_SAPIC_SIZE 0x100000
+
+#define PIB_START 0xfee00000UL
+#define PIB_SIZE 0x100000
+
+#define GFW_START (4*MEM_G -16*MEM_M)
+#define GFW_SIZE (16*MEM_M)
+
+typedef struct io_range {
+ unsigned long start;
+ unsigned long size;
+ unsigned long type;
+} io_range_t;
+
+io_range_t io_ranges[] = {
+ {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
+ {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
+ {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
+ {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
+ {PIB_START, PIB_SIZE, GPFN_PIB},
+};
+
+#define VMX_SYS_PAGES (2 + GFW_SIZE >> PAGE_SHIFT)
+#define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
+
+int vmx_alloc_contig_pages(struct domain *d)
+{
+ unsigned int order, i, j;
+ unsigned long start, end, pgnr, conf_nr;
+ struct pfn_info *page;
+ struct vcpu *v = d->vcpu[0];
+
+ ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
+
+ conf_nr = VMX_CONFIG_PAGES(d);
+ order = get_order_from_pages(conf_nr);
+ if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
+ printk("Could not allocate order=%d pages for vmx contig alloc\n",
+ order);
+ return -1;
+ }
+
+ /* Map normal memory below 3G */
+ pgnr = page_to_pfn(page);
+ end = conf_nr << PAGE_SHIFT;
+ for (i = 0;
+ i < (end < MMIO_START ? end : MMIO_START);
+ i += PAGE_SIZE, pgnr++)
+ map_domain_page(d, i, pgnr << PAGE_SHIFT);
+
+ /* Map normal memory beyond 4G */
+ if (unlikely(end > MMIO_START)) {
+ start = 4 * MEM_G;
+ end = start + (end - 3 * MEM_G);
+ for (i = start; i < end; i += PAGE_SIZE, pgnr++)
+ map_domain_page(d, i, pgnr << PAGE_SHIFT);
+ }
+
+ d->arch.max_pfn = end >> PAGE_SHIFT;
+
+ order = get_order_from_pages(VMX_SYS_PAGES);
+ if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
+ printk("Could not allocate order=%d pages for vmx contig alloc\n",
+ order);
+ return -1;
+ }
+
+ /* Map for shared I/O page and xenstore */
+ pgnr = page_to_pfn(page);
+ map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
+ pgnr++;
+ map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
+ pgnr++;
+
+ /* Map guest firmware */
+ for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
+ map_domain_page(d, i, pgnr << PAGE_SHIFT);
+
+ /* Mark I/O ranges */
+ for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
+ for (j = io_ranges[i].start;
+ j < io_ranges[i].start + io_ranges[i].size;
+ j += PAGE_SIZE)
+ map_domain_io_page(d, j);
+ }
+
+ set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
+ return 0;
+}
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/xen/dom0_ops.c Tue Sep 13 19:42:33 2005
@@ -148,7 +148,6 @@
put_domain(d);
}
break;
-#ifndef CONFIG_VTI
/*
* NOTE: DOM0_GETMEMLIST has somewhat different semantics on IA64 -
* it actually allocates and maps pages.
@@ -168,6 +167,14 @@
{
ret = 0;
+ /* A temp trick here. When max_pfns == -1, we assume
+ * the request is for machine contiguous pages, so request
+ * all pages at first query
+ */
+ if ((op->u.getmemlist.max_pfns == -1UL) &&
+ !test_bit(ARCH_VMX_CONTIG_MEM,&d->vcpu[0]->arch.arch_vmx.flags))
+ return vmx_alloc_contig_pages(d) ? (-ENOMEM) : 0;
+
for ( i = start_page; i < (start_page + nr_pages); i++ )
{
page = map_new_domain_page(d, i << PAGE_SHIFT);
@@ -192,42 +199,6 @@
}
}
break;
-#else
- case DOM0_GETMEMLIST:
- {
- int i;
- struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
- unsigned long max_pfns = op->u.getmemlist.max_pfns;
- unsigned long pfn;
- unsigned long *buffer = op->u.getmemlist.buffer;
- struct list_head *list_ent;
-
- ret = -EINVAL;
- if (!d) {
- ret = 0;
-
- spin_lock(&d->page_alloc_lock);
- list_ent = d->page_list.next;
- for (i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++) {
- pfn = list_entry(list_ent, struct pfn_info, list) -
- frame_table;
- if (put_user(pfn, buffer)) {
- ret = -EFAULT;
- break;
- }
- buffer++;
- list_ent = frame_table[pfn].list.next;
- }
- spin_unlock(&d->page_alloc_lock);
-
- op->u.getmemlist.num_pfns = i;
- copy_to_user(u_dom0_op, op, sizeof(*op));
-
- put_domain(d);
- }
- }
- break;
-#endif // CONFIG_VTI
default:
ret = -ENOSYS;
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/xen/dom_fw.c Tue Sep 13 19:42:33 2005
@@ -490,7 +490,7 @@
unsigned char checksum = 0;
char *cp, *cmd_line, *fw_vendor;
int i = 0;
- unsigned long maxmem = d->max_pages * PAGE_SIZE;
+ unsigned long maxmem = (d->max_pages - d->arch.sys_pgnr) * PAGE_SIZE;
unsigned long start_mpaddr = ((d==dom0)?dom0_start:0);
# define MAKE_MD(typ, attr, start, end, abs) \
@@ -512,10 +512,6 @@
return 0;
}
*/
- /* Last page is for xenstore, and not exported to domain */
- if (d != dom0)
- maxmem = (d->max_pages - 1) * PAGE_SIZE;
-
memset(fw_mem, 0, fw_mem_size);
#ifdef XEN
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/xen/domain.c Tue Sep 13 19:42:33 2005
@@ -233,6 +233,7 @@
d->arch.breakimm = 0x1000;
v->arch.breakimm = d->arch.breakimm;
+ d->arch.sys_pgnr = 0;
d->arch.mm = xmalloc(struct mm_struct);
if (unlikely(!d->arch.mm)) {
printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
@@ -295,6 +296,7 @@
}
v->arch.domain_itm_last = -1L;
+ d->arch.sys_pgnr = c->sys_pgnr;
d->shared_info->arch = c->shared;
/* Don't redo final setup */
@@ -467,6 +469,43 @@
if (pte_none(*pte)) {
set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+ }
+ else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+}
+
+/* map a physical address with specified I/O flag */
+void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long
flags)
+{
+ struct mm_struct *mm = d->arch.mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ pte_t io_pte;
+
+ if (!mm->pgd) {
+ printk("map_domain_page: domain pgd must exist!\n");
+ return;
+ }
+ ASSERT(flags & GPFN_IO_MASK);
+
+ pgd = pgd_offset(mm,mpaddr);
+ if (pgd_none(*pgd))
+ pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
+
+ pud = pud_offset(pgd, mpaddr);
+ if (pud_none(*pud))
+ pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+
+ pmd = pmd_offset(pud, mpaddr);
+ if (pmd_none(*pmd))
+ pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
+// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
+
+ pte = pte_offset_map(pmd, mpaddr);
+ if (pte_none(*pte)) {
+ pte_val(io_pte) = flags;
+ set_pte(pte, io_pte);
}
else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
}
@@ -910,10 +949,12 @@
panic("PAL CACHE FLUSH failed for dom0.\n");
printk("Sync i/d cache for dom0 image SUCC\n");
+ /* Set up start info area. */
+ si = (start_info_t *)alloc_xenheap_page();
+ memset(si, 0, PAGE_SIZE);
+ d->shared_info->arch.start_info_pfn = __pa(si) >> PAGE_SHIFT;
+
#if 0
- /* Set up start info area. */
- //si = (start_info_t *)vstartinfo_start;
- memset(si, 0, PAGE_SIZE);
si->nr_pages = d->tot_pages;
si->shared_info = virt_to_phys(d->shared_info);
si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/xen/hypercall.c Tue Sep 13 19:42:33 2005
@@ -152,12 +152,9 @@
break;
case __HYPERVISOR_memory_op:
-#ifdef CONFIG_VTI
- regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16,
regs->r17, regs->r18);
-#else
+ //regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16,
regs->r17, regs->r18);
/* we don't handle reservations; just return success */
regs->r8 = regs->r16;
-#endif
break;
case __HYPERVISOR_event_channel_op:
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/xen/process.c Tue Sep 13 19:42:33 2005
@@ -30,6 +30,7 @@
#include <asm/ia64_int.h>
#include <asm/dom_fw.h>
#include "hpsim_ssc.h"
+#include <xen/multicall.h>
extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
extern struct ia64_sal_retval pal_emulator_static(UINT64);
@@ -659,7 +660,8 @@
else do_ssc(vcpu_get_gr(current,36), regs);
}
else if (iim == d->arch.breakimm) {
- if (ia64_hypercall(regs))
+ if (ia64_hypercall(regs) &&
+ !PSCBX(v, hypercall_continuation))
vcpu_increment_iip(current);
}
else if (!PSCB(v,interrupt_collection_enabled)) {
@@ -747,3 +749,40 @@
if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v,
isr, regs)) return;
reflect_interruption(ifa,isr,itir,regs,vector);
}
+
+unsigned long __hypercall_create_continuation(
+ unsigned int op, unsigned int nr_args, ...)
+{
+ struct mc_state *mcs = &mc_state[smp_processor_id()];
+ VCPU *vcpu = current;
+ struct cpu_user_regs *regs = vcpu->arch.regs;
+ unsigned int i;
+ va_list args;
+
+ va_start(args, nr_args);
+ if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
+ panic("PREEMPT happen in multicall\n"); // Not support yet
+ } else {
+ vcpu_set_gr(vcpu, 2, op);
+ for ( i = 0; i < nr_args; i++) {
+ switch (i) {
+ case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long));
+ break;
+ case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long));
+ break;
+ case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long));
+ break;
+ case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long));
+ break;
+ case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long));
+ break;
+ default: panic("Too many args for hypercall continuation\n");
+ break;
+ }
+ }
+ }
+ vcpu->arch.hypercall_continuation = 1;
+ va_end(args);
+ return op;
+}
+
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c Tue Sep 13 19:08:00 2005
+++ b/xen/arch/ia64/xen/xenmisc.c Tue Sep 13 19:42:33 2005
@@ -103,14 +103,6 @@
while(1);
}
return frame;
-}
-#endif
-
-#ifndef CONFIG_VTI
-unsigned long __hypercall_create_continuation(
- unsigned int op, unsigned int nr_args, ...)
-{
- printf("__hypercall_create_continuation: not implemented!!!\n");
}
#endif
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h Tue Sep 13 19:08:00 2005
+++ b/xen/include/asm-ia64/domain.h Tue Sep 13 19:42:33 2005
@@ -26,12 +26,9 @@
int imp_va_msb;
unsigned long *pmt; /* physical to machine table */
- /*
- * max_pfn is the maximum page frame in guest physical space, including
- * inter-middle I/O ranges and memory holes. This is different with
- * max_pages in domain struct, which indicates maximum memory size
- */
- unsigned long max_pfn;
+ /* System pages out of guest memory, like for xenstore/console */
+ unsigned long sys_pgnr;
+ unsigned long max_pfn; /* Max pfn including I/O holes */
struct virutal_platform_def vmx_platform;
u64 xen_vastart;
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Tue Sep 13 19:08:00 2005
+++ b/xen/include/asm-ia64/vmx_vpd.h Tue Sep 13 19:42:33 2005
@@ -89,7 +89,8 @@
#define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */
#define ARCH_VMX_VMCS_RESUME 2 /* Needs VMCS resume */
#define ARCH_VMX_IO_WAIT 3 /* Waiting for I/O completion */
-#define ARCH_VMX_INTR_ASSIST 4 /* Need DM's assist to issue intr */
+#define ARCH_VMX_INTR_ASSIST 4 /* Need DM's assist to issue intr */
+#define ARCH_VMX_CONTIG_MEM 5 /* Need contiguous machine pages */
#define VMX_DEBUG 1
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/include/asm-ia64/xenpage.h
--- a/xen/include/asm-ia64/xenpage.h Tue Sep 13 19:08:00 2005
+++ b/xen/include/asm-ia64/xenpage.h Tue Sep 13 19:42:33 2005
@@ -8,7 +8,7 @@
#undef pfn_valid
#undef page_to_pfn
#undef pfn_to_page
-# define pfn_valid(pfn) (0)
+# define pfn_valid(_pfn) ((_pfn) > max_page)
# define page_to_pfn(_page) ((unsigned long) ((_page) - frame_table))
# define pfn_to_page(_pfn) (frame_table + (_pfn))
diff -r 5cd24dd33033 -r 2c823d27cf33 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h Tue Sep 13 19:08:00 2005
+++ b/xen/include/public/arch-ia64.h Tue Sep 13 19:42:33 2005
@@ -255,11 +255,8 @@
#define __ARCH_HAS_VCPU_INFO
typedef struct {
- int domain_controller_evtchn;
unsigned int flags;
- unsigned short store_evtchn;
- unsigned long store_mfn;
-//} arch_shared_info_t;
+ unsigned long start_info_pfn;
} arch_shared_info_t; // DON'T PACK
typedef struct vcpu_guest_context {
@@ -268,10 +265,9 @@
#define VGCF_IN_KERNEL (1<<2)
unsigned long flags; /* VGCF_* flags */
unsigned long pt_base; /* PMT table base */
- unsigned long pt_max_pfn; /* Max pfn including holes */
unsigned long share_io_pg; /* Shared page for I/O emulation */
+ unsigned long sys_pgnr; /* System pages out of domain memory */
unsigned long vm_assist; /* VMASST_TYPE_* bitmap, now none on IPF */
- unsigned long guest_iip; /* Guest entry point */
cpu_user_regs_t regs;
arch_vcpu_info_t vcpu;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|