# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID 4c2d101a3228fa27117a771121f00bc38964e89d
# Parent 50778f42f2dd1de222219b132717744784d35b5f
Re-arrange code for followup patch and remove extra shadow_lock in function
which is only called with the lock already held.
Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>
diff -r 50778f42f2dd -r 4c2d101a3228 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Wed Mar 29 15:02:40 2006
+++ b/xen/include/asm-x86/shadow.h Wed Mar 29 15:47:46 2006
@@ -135,6 +135,8 @@
struct domain_mmap_cache *l1cache);
extern void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype);
+extern void free_shadow_page(unsigned long smfn);
+
extern void shadow_l1_normal_pt_update(struct domain *d,
paddr_t pa, l1_pgentry_t l1e,
struct domain_mmap_cache *cache);
@@ -660,54 +662,12 @@
if ( likely(!shadow_mode_refcounts(d)) )
return;
- shadow_lock(d);
-
if ( page_out_of_sync(page) )
__shadow_sync_mfn(d, page_to_mfn(page));
shadow_remove_all_access(d, page_to_mfn(page));
-
- shadow_unlock(d);
-}
-#endif
-
-static inline void guest_physmap_add_page(
- struct domain *d, unsigned long gpfn, unsigned long mfn)
-{
- struct domain_mmap_cache c1, c2;
-
- if ( likely(!shadow_mode_translate(d)) )
- return;
-
- domain_mmap_cache_init(&c1);
- domain_mmap_cache_init(&c2);
- shadow_lock(d);
- shadow_sync_and_drop_references(d, mfn_to_page(mfn));
- set_p2m_entry(d, gpfn, mfn, &c1, &c2);
- set_gpfn_from_mfn(mfn, gpfn);
- shadow_unlock(d);
- domain_mmap_cache_destroy(&c1);
- domain_mmap_cache_destroy(&c2);
-}
-
-static inline void guest_physmap_remove_page(
- struct domain *d, unsigned long gpfn, unsigned long mfn)
-{
- struct domain_mmap_cache c1, c2;
-
- if ( likely(!shadow_mode_translate(d)) )
- return;
-
- domain_mmap_cache_init(&c1);
- domain_mmap_cache_init(&c2);
- shadow_lock(d);
- shadow_sync_and_drop_references(d, mfn_to_page(mfn));
- set_p2m_entry(d, gpfn, -1, &c1, &c2);
- set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
- shadow_unlock(d);
- domain_mmap_cache_destroy(&c1);
- domain_mmap_cache_destroy(&c2);
-}
+}
+#endif
/************************************************************************/
@@ -738,8 +698,6 @@
return 1;
}
-
-extern void free_shadow_page(unsigned long smfn);
/*
* Drop a shadow reference to smfn.
@@ -1525,6 +1483,46 @@
/************************************************************************/
+static inline void guest_physmap_add_page(
+ struct domain *d, unsigned long gpfn, unsigned long mfn)
+{
+ struct domain_mmap_cache c1, c2;
+
+ if ( likely(!shadow_mode_translate(d)) )
+ return;
+
+ domain_mmap_cache_init(&c1);
+ domain_mmap_cache_init(&c2);
+ shadow_lock(d);
+ shadow_sync_and_drop_references(d, mfn_to_page(mfn));
+ set_p2m_entry(d, gpfn, mfn, &c1, &c2);
+ set_gpfn_from_mfn(mfn, gpfn);
+ shadow_unlock(d);
+ domain_mmap_cache_destroy(&c1);
+ domain_mmap_cache_destroy(&c2);
+}
+
+static inline void guest_physmap_remove_page(
+ struct domain *d, unsigned long gpfn, unsigned long mfn)
+{
+ struct domain_mmap_cache c1, c2;
+
+ if ( likely(!shadow_mode_translate(d)) )
+ return;
+
+ domain_mmap_cache_init(&c1);
+ domain_mmap_cache_init(&c2);
+ shadow_lock(d);
+ shadow_sync_and_drop_references(d, mfn_to_page(mfn));
+ set_p2m_entry(d, gpfn, -1, &c1, &c2);
+ set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+ shadow_unlock(d);
+ domain_mmap_cache_destroy(&c1);
+ domain_mmap_cache_destroy(&c2);
+}
+
+/************************************************************************/
+
void static inline
shadow_update_min_max(unsigned long smfn, int index)
{
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|