[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v8][PATCH 04/17] update the existing hypercall to support XEN_DOMCTL_set_rdm



On 2014/12/8 16:51, Jan Beulich wrote:
On 08.12.14 at 08:11, <tiejun.chen@xxxxxxxxx> wrote:
On 2014/12/4 23:50, Jan Beulich wrote:
On 01.12.14 at 10:24, <tiejun.chen@xxxxxxxxx> wrote:

-        if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
-                                     &rdm, 1) )
-            return -EFAULT;
+    if ( d )
+    {
+        if ( d->arch.hvm_domain.pci_force )

You didn't verify that the domain is a HVM/PVH one.

Is this, ASSERT(is_hvm_domain(grdm.domain)), correct?

Certainly not, or do you want to crash the hypervisor because of bad
tools input?

Based on konrad's hint, I hope this should work for you,

+        if ( !is_hvm_domain(d) )
+            return -ENOSYS;


+        {
+            if ( grdm->used_entries < grdm->map.nr_entries )
+            {
+                if ( __copy_to_compat_offset(grdm->map.buffer,
+                                             grdm->used_entries,
+                                             &rdm, 1) )
+                {
+                    rcu_unlock_domain(d);
+                    return -EFAULT;
+                }
+            }
+            ++grdm->used_entries;
+        }
+        else
+        {
+            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
+            {
+                sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[i].seg,
+                                 d->arch.hvm_domain.pcidevs[i].bus,
+                                 d->arch.hvm_domain.pcidevs[i].devfn);
+                if ( sbdf == id )
+                {
+                    if ( grdm->used_entries < grdm->map.nr_entries )
+                    {
+                        if ( __copy_to_compat_offset(grdm->map.buffer,
+                                                     grdm->used_entries,
+                                                     &rdm, 1) )
+                        {
+                            rcu_unlock_domain(d);
+                            return -EFAULT;
+                        }
+                    }
+                    ++grdm->used_entries;

break;

Added.


Also trying to fold code identical on the if and else branches would
seem pretty desirable.

Sorry, I can't see what I'm missing.

The whole "if-copy-unlock-and-return-EFAULT-otherwise-increment"
is identical and can be factored out pretty easily afaict.

What about this?

struct get_reserved_device_memory {
    struct xen_reserved_device_memory_map map;
    unsigned int used_entries;
    struct domain *domain;
};

static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
                                      u32 id, void *ctxt)
{
    struct get_reserved_device_memory *grdm = ctxt;
    struct domain *d = grdm->domain;
    unsigned int i, hit_one = 0;
    u32 sbdf;
    struct xen_reserved_device_memory rdm = {
        .start_pfn = start, .nr_pages = nr
    };

    if ( !d->arch.hvm_domain.pci_force )
    {
        for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
        {
            sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[i].seg,
                             d->arch.hvm_domain.pcidevs[i].bus,
                             d->arch.hvm_domain.pcidevs[i].devfn);
            if ( sbdf == id )
            {
                hit_one = 1;
                break;
            }
        }

        if ( !hit_one )
            return 0;
    }

    if ( grdm->used_entries < grdm->map.nr_entries )
    {
        if ( __copy_to_guest_offset(grdm->map.buffer,
                                    grdm->used_entries,
                                    &rdm, 1) )
            return -EFAULT;
    }

    ++grdm->used_entries;

    return 0;
}


@@ -319,9 +358,13 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE_PARAM(void) compat)

               if ( !rc && grdm.map.nr_entries < grdm.used_entries )
                   rc = -ENOBUFS;
+
               grdm.map.nr_entries = grdm.used_entries;
-            if ( __copy_to_guest(compat, &grdm.map, 1) )
-                rc = -EFAULT;
+            if ( grdm.map.nr_entries )
+            {
+                if ( __copy_to_guest(compat, &grdm.map, 1) )
+                    rc = -EFAULT;
+            }

Why do you need this change?

If we have no any entries, why do we still copy that?

That's not only a pointless optimization (the counter question being
"Why add an extra conditional when the copying does no harm?"), but
also not subject of this patch. Additionally iirc the field is an IN/OUT,
i.e. when no entries were found you want to tell the caller so.

Right so I will recover that.


--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -904,17 +904,33 @@ int platform_supports_x2apic(void)

   int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
   {
-    struct acpi_rmrr_unit *rmrr;
+    struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL;
       int rc = 0;
+    unsigned int i;
+    u16 bdf;

-    list_for_each_entry(rmrr, &acpi_rmrr_units, list)
+    for_each_rmrr_device ( rmrr, bdf, i )
       {
-        rc = func(PFN_DOWN(rmrr->base_address),
-                  PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
-                  ctxt);
-        if ( rc )
-            break;
+        if ( rmrr != rmrr_cur )
+        {
+            rc = func(PFN_DOWN(rmrr->base_address),
+                      PFN_UP(rmrr->end_address) -
+                        PFN_DOWN(rmrr->base_address),
+                      PCI_SBDF(rmrr->segment, bdf),
+                      ctxt);
+
+            if ( unlikely(rc < 0) )
+                return rc;
+
+            /* Just go next. */
+            if ( !rc )
+                rmrr_cur = rmrr;
+
+            /* Now just return specific to user requirement. */
+            if ( rc > 0 )
+                return rc;

Nice that you check for that, but I can't see this case occurring
anymore. Did you lose some code? Also please don't write code

We have three scenarios here:

#1 rc < 0 means this is an error;
#2 rc = 0 means the tools caller don't know how many buffers it should
construct, so we need to count all entries as 'nr_entries' to return.
#3 rc > 0 means in some cases, we need to return some specific values,
like 1 to indicate we're hitting some RMRR range. Currently, we use gfn
to check this in case of memory populating, ept violation handler and
mem_access.

Yes, I saw that you make use of this in later patches. It just seemed
suspicious that you don't in this one.

--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -586,6 +586,11 @@ typedef struct xen_reserved_device_memory
xen_reserved_device_memory_t;
   DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);

   struct xen_reserved_device_memory_map {
+    /*
+     * Domain whose reservation is being changed.
+     * Unprivileged domains can specify only DOMID_SELF.
+     */
+    domid_t        domid;
       /* IN/OUT */
       unsigned int nr_entries;
       /* OUT */

Your addition lacks an IN annotation.

Are you saying something for 'nr_entries'? But I didn't introduce
anything to change the original usage. Anyway, I try to improve this,

      /*
       * IN: on call the number of entries which can be stored in buffer.
       * OUT: on return the number of entries which have been stored in
       * buffer. If on call the number is less the number of all necessary
       * entries, on return the number of entries which is needed.
       */


No, I said "your addition lacks ...". And you addition is the "domid"
field.


Sorry I'm misunderstanding this.

struct xen_reserved_device_memory_map {
    /*
     * IN: Domain whose reservation is being changed.
     * Unprivileged domains can specify only DOMID_SELF.
     */
    domid_t        domid;
    /* IN/OUT */
    unsigned int nr_entries;
    /* OUT */
    XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
};

Thanks
Tiejun

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.