# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1190278630 -3600
# Node ID b33ee2276b6a3fa9a466734ba514edcae2379d0a
# Parent 39c85fa942aa3b9f8a6fdbc210d695e6eff017f5
domctl and p2m changes for PCI passthru.
Signed-off-by: Allen Kay <allen.m.kay@xxxxxxxxx>
Signed-off-by: Guy Zana <guy@xxxxxxxxxxxx>
---
xen/arch/x86/domctl.c | 151 +++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/mm/p2m.c | 47 ++++++++++++
xen/arch/x86/mm/shadow/multi.c | 2
xen/include/asm-x86/p2m.h | 4 +
4 files changed, 202 insertions(+), 2 deletions(-)
diff -r 39c85fa942aa -r b33ee2276b6a xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Thu Sep 20 09:42:01 2007 +0100
+++ b/xen/arch/x86/domctl.c Thu Sep 20 09:57:10 2007 +0100
@@ -25,6 +25,8 @@
#include <asm/hvm/support.h>
#include <asm/processor.h>
#include <xsm/xsm.h>
+#include <xen/list.h>
+#include <asm/iommu.h>
long arch_do_domctl(
struct xen_domctl *domctl,
@@ -523,6 +525,155 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_assign_device:
+ {
+ struct domain *d;
+ struct hvm_iommu *hd;
+ u8 bus, devfn;
+
+ if (!vtd_enabled)
+ break;
+
+ ret = -EINVAL;
+ if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) {
+ gdprintk(XENLOG_ERR,
+ "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
+ break;
+ }
+ hd = domain_hvm_iommu(d);
+ bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
+ devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
+ ret = assign_device(d, bus, devfn);
+ gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ put_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_bind_pt_irq:
+ {
+ struct domain * d;
+ xen_domctl_bind_pt_irq_t * bind;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+ bind = &(domctl->u.bind_pt_irq);
+ if (vtd_enabled)
+ ret = pt_irq_create_bind_vtd(d, bind);
+ if (ret < 0)
+ gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_memory_mapping:
+ {
+ struct domain *d;
+ unsigned long gfn = domctl->u.memory_mapping.first_gfn;
+ unsigned long mfn = domctl->u.memory_mapping.first_mfn;
+ unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
+ int i;
+
+ ret = -EINVAL;
+ if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+ break;
+
+ ret = -ESRCH;
+ if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
+ break;
+
+ ret=0;
+ if ( domctl->u.memory_mapping.add_mapping )
+ {
+ gdprintk(XENLOG_INFO,
+ "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+ gfn, mfn, nr_mfns);
+
+ ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
+ for ( i = 0; i < nr_mfns; i++ )
+ set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
+ }
+ else
+ {
+ gdprintk(XENLOG_INFO,
+ "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+ gfn, mfn, nr_mfns);
+
+ for ( i = 0; i < nr_mfns; i++ )
+ clear_mmio_p2m_entry(d, gfn+i);
+ ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+ }
+
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_ioport_mapping:
+ {
+#define MAX_IOPORTS 0x10000
+ struct domain *d;
+ struct hvm_iommu *hd;
+ unsigned int fgp = domctl->u.ioport_mapping.first_gport;
+ unsigned int fmp = domctl->u.ioport_mapping.first_mport;
+ unsigned int np = domctl->u.ioport_mapping.nr_ports;
+ struct g2m_ioport *g2m_ioport;
+ int found = 0;
+
+ ret = -EINVAL;
+ if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
+ ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
+ {
+ gdprintk(XENLOG_ERR,
+ "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
+ fgp, fmp, np);
+ break;
+ }
+
+ ret = -ESRCH;
+ if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
+ break;
+
+ hd = domain_hvm_iommu(d);
+ if ( domctl->u.ioport_mapping.add_mapping )
+ {
+ gdprintk(XENLOG_INFO,
+ "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
+ fgp, fmp, np);
+
+ list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ if (g2m_ioport->mport == fmp ) {
+ g2m_ioport->gport = fgp;
+ g2m_ioport->np = np;
+ found = 1;
+ break;
+ }
+ if ( !found )
+ {
+ g2m_ioport = xmalloc(struct g2m_ioport);
+ g2m_ioport->gport = fgp;
+ g2m_ioport->mport = fmp;
+ g2m_ioport->np = np;
+ list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
+ }
+ ret = ioports_permit_access(d, fmp, fmp + np - 1);
+
+ }
+ else {
+ gdprintk(XENLOG_INFO,
+ "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
+ fgp, fmp, np);
+ list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ if ( g2m_ioport->mport == fmp ) {
+ list_del(&g2m_ioport->list);
+ break;
+ }
+ ret = ioports_deny_access(d, fmp, fmp + np - 1);
+ }
+ rcu_unlock_domain(d);
+ }
+ break;
+
default:
ret = -ENOSYS;
break;
diff -r 39c85fa942aa -r b33ee2276b6a xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Sep 20 09:42:01 2007 +0100
+++ b/xen/arch/x86/mm/p2m.c Thu Sep 20 09:57:10 2007 +0100
@@ -27,6 +27,7 @@
#include <asm/page.h>
#include <asm/paging.h>
#include <asm/p2m.h>
+#include <asm/iommu.h>
/* Debugging and auditing of the P2M code? */
#define P2M_AUDIT 0
@@ -244,13 +245,16 @@ set_p2m_entry(struct domain *d, unsigned
if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
d->arch.p2m.max_mapped_pfn = gfn;
- if ( mfn_valid(mfn) )
+ if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) )
entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt));
else
entry_content = l1e_empty();
/* level 1 entry */
paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
+
+ if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
+ iommu_flush(d, gfn, (u64*)p2m_entry);
/* Success */
rv = 1;
@@ -350,6 +354,11 @@ int p2m_alloc_table(struct domain *d,
&& !set_p2m_entry(d, gfn, mfn, p2m_ram_rw) )
goto error;
}
+
+#if CONFIG_PAGING_LEVELS >= 3
+ if (vtd_enabled && is_hvm_domain(d))
+ iommu_set_pgd(d);
+#endif
P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
p2m_unlock(d);
@@ -860,6 +869,42 @@ p2m_type_t p2m_change_type(struct domain
return pt;
}
+int
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+ int rc = 0;
+
+ rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
+ if ( 0 == rc )
+ gdprintk(XENLOG_ERR,
+ "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+ gmfn_to_mfn(d, gfn));
+ return rc;
+}
+
+int
+clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
+{
+ int rc = 0;
+
+ unsigned long mfn;
+ mfn = gmfn_to_mfn(d, gfn);
+ if ( INVALID_MFN == mfn )
+ {
+ gdprintk(XENLOG_ERR,
+ "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
+ return 0;
+ }
+ rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0);
+
+#if !defined(__x86_64__)
+ /* x86_64 xen does not map mmio entries in machine_to_phys_mapp[] */
+ set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+#endif
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff -r 39c85fa942aa -r b33ee2276b6a xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Thu Sep 20 09:42:01 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Thu Sep 20 09:57:10 2007 +0100
@@ -685,7 +685,7 @@ _sh_propagate(struct vcpu *v,
/* N.B. For pass-through MMIO, either this test needs to be relaxed,
* and shadow_set_l1e() trained to handle non-valid MFNs (ugh), or the
* MMIO areas need to be added to the frame-table to make them "valid". */
- if ( !mfn_valid(target_mfn) )
+ if ( !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
{
ASSERT((ft == ft_prefetch));
*sp = shadow_l1e_empty();
diff -r 39c85fa942aa -r b33ee2276b6a xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Sep 20 09:42:01 2007 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Sep 20 09:57:10 2007 +0100
@@ -222,6 +222,10 @@ p2m_type_t p2m_change_type(struct domain
p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
p2m_type_t ot, p2m_type_t nt);
+/* Set mmio addresses in the p2m table (for pass-through) */
+int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+
#endif /* _XEN_P2M_H */
/*
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|