[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Resend RFC PATCH V4 07/13] HV/Vmbus: Add SNP support for VMbus channel initiate message



From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>

The monitor pages in the CHANNELMSG_INITIATE_CONTACT msg are shared
with host in Isolation VM and so it's necessary to use hvcall to set
them visible to host. In Isolation VM with AMD SEV SNP, the access
address should be in the extra space which is above shared gpa
boundary. So remap these pages into the extra address(pa +
shared_gpa_boundary).

Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
 drivers/hv/connection.c   | 65 +++++++++++++++++++++++++++++++++++++++
 drivers/hv/hyperv_vmbus.h |  1 +
 2 files changed, 66 insertions(+)

diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 186fd4c8acd4..a32bde143e4c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -19,6 +19,7 @@
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
 #include <linux/export.h>
+#include <linux/io.h>
 #include <asm/mshyperv.h>
 
 #include "hyperv_vmbus.h"
@@ -104,6 +105,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo 
*msginfo, u32 version)
 
        msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
        msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+       if (hv_is_isolation_supported()) {
+               msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;
+               msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;
+       }
+
        msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
 
        /*
@@ -148,6 +155,31 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo 
*msginfo, u32 version)
                return -ECONNREFUSED;
        }
 
+       if (hv_is_isolation_supported()) {
+               vmbus_connection.monitor_pages_va[0]
+                       = vmbus_connection.monitor_pages[0];
+               vmbus_connection.monitor_pages[0]
+                       = memremap(msg->monitor_page1, HV_HYP_PAGE_SIZE,
+                                  MEMREMAP_WB);
+               if (!vmbus_connection.monitor_pages[0])
+                       return -ENOMEM;
+
+               vmbus_connection.monitor_pages_va[1]
+                       = vmbus_connection.monitor_pages[1];
+               vmbus_connection.monitor_pages[1]
+                       = memremap(msg->monitor_page2, HV_HYP_PAGE_SIZE,
+                                  MEMREMAP_WB);
+               if (!vmbus_connection.monitor_pages[1]) {
+                       memunmap(vmbus_connection.monitor_pages[0]);
+                       return -ENOMEM;
+               }
+
+               memset(vmbus_connection.monitor_pages[0], 0x00,
+                      HV_HYP_PAGE_SIZE);
+               memset(vmbus_connection.monitor_pages[1], 0x00,
+                      HV_HYP_PAGE_SIZE);
+       }
+
        return ret;
 }
 
@@ -159,6 +191,7 @@ int vmbus_connect(void)
        struct vmbus_channel_msginfo *msginfo = NULL;
        int i, ret = 0;
        __u32 version;
+       u64 pfn[2];
 
        /* Initialize the vmbus connection */
        vmbus_connection.conn_state = CONNECTING;
@@ -216,6 +249,16 @@ int vmbus_connect(void)
                goto cleanup;
        }
 
+       if (hv_is_isolation_supported()) {
+               pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+               pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+               if (hv_mark_gpa_visibility(2, pfn,
+                               VMBUS_PAGE_VISIBLE_READ_WRITE)) {
+                       ret = -EFAULT;
+                       goto cleanup;
+               }
+       }
+
        msginfo = kzalloc(sizeof(*msginfo) +
                          sizeof(struct vmbus_channel_initiate_contact),
                          GFP_KERNEL);
@@ -282,6 +325,8 @@ int vmbus_connect(void)
 
 void vmbus_disconnect(void)
 {
+       u64 pfn[2];
+
        /*
         * First send the unload request to the host.
         */
@@ -301,6 +346,26 @@ void vmbus_disconnect(void)
                vmbus_connection.int_page = NULL;
        }
 
+       if (hv_is_isolation_supported()) {
+               if (vmbus_connection.monitor_pages_va[0]) {
+                       memunmap(vmbus_connection.monitor_pages[0]);
+                       vmbus_connection.monitor_pages[0]
+                               = vmbus_connection.monitor_pages_va[0];
+                       vmbus_connection.monitor_pages_va[0] = NULL;
+               }
+
+               if (vmbus_connection.monitor_pages_va[1]) {
+                       memunmap(vmbus_connection.monitor_pages[1]);
+                       vmbus_connection.monitor_pages[1]
+                               = vmbus_connection.monitor_pages_va[1];
+                       vmbus_connection.monitor_pages_va[1] = NULL;
+               }
+
+               pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+               pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+               hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);
+       }
+
        hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
        hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
        vmbus_connection.monitor_pages[0] = NULL;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 42f3d9d123a1..40bc0eff6665 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -240,6 +240,7 @@ struct vmbus_connection {
         * is child->parent notification
         */
        struct hv_monitor_page *monitor_pages[2];
+       void *monitor_pages_va[2];
        struct list_head chn_msg_list;
        spinlock_t channelmsg_lock;
 
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.