[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4 of 5] mem_access: EPT bit allocation changes for access


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Joe Epstein <jepstein@xxxxxxxxxxxxxxxxxxxx>
  • Date: Tue, 28 Dec 2010 23:27:29 -0800
  • Delivery-date: Tue, 28 Dec 2010 23:33:47 -0800
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:sender:from:date:x-google-sender-auth:message-id :subject:to:content-type; b=KXpeAprywLTklSdgmIMEsdGwP/+a77S9EFXqQzpT/FiwZy9+rX1v5ZuBUKUZtPZQHr +Dr+9s2iONBYfTaxASvFQWGJCqETy+d6hcyU1AalYUs0AwssUPmjmIcFSrUG4V+h4jA2 xVL6nYIa7wXOvi90t91S03zNB+Sl5P2MiyDj8=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

* Allocated four bits of the EPT for access permissions.  Unfortunately, this
  meant removing four bits from the just allocated 10-bit page type
  (p2m_type_t); however, 10 bits is a lot for page types, so it seemed
  reasonable.
* Defines the specific page access permissions

Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>

diff -r 4e108cf56d07 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Dec 28 23:07:17 2010 -0800
@@ -356,7 +356,10 @@

 int hvm_debug_op(struct vcpu *v, int32_t op);

-bool_t hvm_hap_nested_page_fault(unsigned long gfn);
+bool_t hvm_hap_nested_page_fault(unsigned long gpa,
+                                bool_t gla_valid, unsigned long gla,
+                                bool_t access_r, bool_t access_w,
+                                bool_t access_x);

 #define hvm_msr_tsc_aux(v) ({                                               \
     struct domain *__d = (v)->domain;                                       \
diff -r 4e108cf56d07 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Dec 28 23:07:17 2010 -0800
@@ -42,7 +42,8 @@
         rsvd2_snp   :   1,  /* bit 11 - Used for VT-d snoop control
                                in shared EPT/VT-d usage */
         mfn         :   40, /* bits 51:12 - Machine physical frame number */
-        sa_p2mt     :   10, /* bits 61:52 - Software available 2 */
+        sa_p2mt     :   6,  /* bits 57:52 - Software available 2 */
+        access      :   4,  /* bits 61:58 - p2m_access_t */
         rsvd3_tm    :   1,  /* bit 62 - Used for VT-d transient-mapping
                                hint in shared EPT/VT-d usage */
         avail3      :   1;  /* bit 63 - Software available 3 */
diff -r 4e108cf56d07 xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h   Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/include/asm-x86/mem_event.h   Tue Dec 28 23:07:17 2010 -0800
@@ -24,6 +24,8 @@
 #ifndef __MEM_EVENT_H__
 #define __MEM_EVENT_H__

+/* Returns true if a listener exists, else pauses VCPU */
+int mem_event_check_listener(struct domain *d);
 int mem_event_check_ring(struct domain *d);
 void mem_event_put_request(struct domain *d, mem_event_request_t *req);
 void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
diff -r 4e108cf56d07 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Mon Dec 27 08:00:09 2010 +0000
+++ b/xen/include/asm-x86/p2m.h Tue Dec 28 23:07:17 2010 -0800
@@ -88,6 +88,31 @@
     p2m_ram_broken  =14,          /* Broken page, access cause domain crash */
 } p2m_type_t;

+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given my the p2m_type_t memory type.  Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+    p2m_access_n     = 0, /* No access permissions allowed */
+    p2m_access_r     = 1,
+    p2m_access_w     = 2,
+    p2m_access_rw    = 3,
+    p2m_access_x     = 4,
+    p2m_access_rx    = 5,
+    p2m_access_wx    = 6,
+    p2m_access_rwx   = 7,
+    p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+
+    /* NOTE: Assumed to be only 4 bits right now */
+} p2m_access_t;
+
 typedef enum {
     p2m_query = 0,              /* Do not populate a PoD entries      */
     p2m_alloc = 1,              /* Automatically populate PoD entries */
@@ -182,18 +207,30 @@
     int                (*set_entry   )(struct p2m_domain *p2m,
                                        unsigned long gfn,
                                        mfn_t mfn, unsigned int page_order,
-                                       p2m_type_t p2mt);
+                                       p2m_type_t p2mt,
+                                       p2m_access_t p2ma);
     mfn_t              (*get_entry   )(struct p2m_domain *p2m,
                                        unsigned long gfn,
                                        p2m_type_t *p2mt,
+                                       p2m_access_t *p2ma,
                                        p2m_query_t q);
     mfn_t              (*get_entry_current)(struct p2m_domain *p2m,
                                             unsigned long gfn,
                                             p2m_type_t *p2mt,
+                                            p2m_access_t *p2ma,
                                             p2m_query_t q);
     void               (*change_entry_type_global)(struct p2m_domain *p2m,
                                                    p2m_type_t ot,
                                                    p2m_type_t nt);
+
+    /* Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiquously
+     * retyped get this access type.  See definition of p2m_access_t. */
+    p2m_access_t default_access;
+
+    /* If true, and an access fault comes in and there is no
mem_event listener,
+     * pause domain.  Otherwise, remove access restrictions. */
+    bool_t       access_required;

     /* Highest guest frame that's ever been mapped in the p2m */
     unsigned long max_mapped_pfn;
@@ -284,9 +321,10 @@
 /* Read the current domain's p2m table.  Do not populate PoD pages. */
 static inline mfn_t gfn_to_mfn_type_current(struct p2m_domain *p2m,
                                             unsigned long gfn, p2m_type_t *t,
+                                            p2m_access_t *a,
                                             p2m_query_t q)
 {
-    return p2m->get_entry_current(p2m, gfn, t, q);
+    return p2m->get_entry_current(p2m, gfn, t, a, q);
 }

 /* Read P2M table, mapping pages as we go.
@@ -295,7 +333,8 @@
 gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
                               p2m_type_t *t, p2m_query_t q)
 {
-    return p2m->get_entry(p2m, gfn, t, q);
+    p2m_access_t a = 0;
+    return p2m->get_entry(p2m, gfn, t, &a, q);
 }


@@ -305,6 +344,7 @@
                                      p2m_query_t q)
 {
     mfn_t mfn;
+    p2m_access_t a;

     if ( !p2m || !paging_mode_translate(p2m->domain) )
     {
@@ -314,7 +354,7 @@
         mfn = _mfn(gfn);
     }
     else if ( likely(current->domain == p2m->domain) )
-        mfn = gfn_to_mfn_type_current(p2m, gfn, t, q);
+        mfn = gfn_to_mfn_type_current(p2m, gfn, t, &a, q);
     else
         mfn = gfn_to_mfn_type_p2m(p2m, gfn, t, q);

@@ -382,7 +422,7 @@
 }

 /* Init the datastructures for later use by the p2m code */
-int p2m_init(struct domain *d);
+int p2m_init(struct domain *d, unsigned int domcr_flags);

 /* Allocate a new p2m table for a domain.
  *
@@ -482,6 +522,14 @@
 { }
 #endif

+/* Send mem event based on the access (gla is -1ull if not available),
+ * return true if the event will be taken care of by a mem event
listener.  Handles
+ * rw2rx conversion */
+int p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
unsigned long gla,
+                         bool_t access_r, bool_t access_w, bool_t access_x);
+/* Resumes the running of the VCPU, restarting the last instruction */
+void p2m_mem_access_resume(struct p2m_domain *p2m);
+
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);

 #endif /* _XEN_P2M_H */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.