# HG changeset patch
# User Ian.Campbell@xxxxxxxxxxxxx
# Node ID 228c96d95c8090dc5daf73de90999dc6cc63f5d3
# Parent dbe5427f3863de5a1f8b71e534d38de21933dc38
# Parent 57c50578414ddabd8064b12826fc13c2a2ed1706
merge
diff -r dbe5427f3863 -r 228c96d95c80 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Wed Jan 11 18:22:24 2006
+++ b/xen/arch/x86/domain.c Wed Jan 11 18:23:34 2006
@@ -288,9 +288,7 @@
#if defined(__i386__)
- d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
- (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
- spin_lock_init(&d->arch.mapcache.lock);
+ mapcache_init(d);
#else /* __x86_64__ */
diff -r dbe5427f3863 -r 228c96d95c80 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Wed Jan 11 18:22:24 2006
+++ b/xen/arch/x86/x86_32/domain_page.c Wed Jan 11 18:23:34 2006
@@ -20,33 +20,16 @@
#include <asm/flushtlb.h>
#include <asm/hardirq.h>
-#define MAPCACHE_ORDER 10
-#define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
-
-/* Use a spare PTE bit to mark entries ready for recycling. */
-#define READY_FOR_TLB_FLUSH (1<<10)
-
-static void flush_all_ready_maps(void)
-{
- struct mapcache *cache = ¤t->domain->arch.mapcache;
- unsigned int i;
-
- for ( i = 0; i < MAPCACHE_ENTRIES; i++ )
- if ( (l1e_get_flags(cache->l1tab[i]) & READY_FOR_TLB_FLUSH) )
- cache->l1tab[i] = l1e_empty();
-}
-
-void *map_domain_pages(unsigned long pfn, unsigned int order)
+void *map_domain_page(unsigned long pfn)
{
unsigned long va;
- unsigned int idx, i, flags, vcpu = current->vcpu_id;
+ unsigned int idx, i, vcpu = current->vcpu_id;
struct domain *d;
struct mapcache *cache;
-#ifndef NDEBUG
- unsigned int flush_count = 0;
-#endif
+ struct vcpu_maphash_entry *hashent;
ASSERT(!in_irq());
+
perfc_incrc(map_domain_page_count);
/* If we are the idle domain, ensure that we run on our own page tables. */
@@ -56,6 +39,16 @@
cache = &d->arch.mapcache;
+ hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)];
+ if ( hashent->pfn == pfn )
+ {
+ idx = hashent->idx;
+ hashent->refcnt++;
+ ASSERT(hashent->refcnt != 0);
+ ASSERT(l1e_get_pfn(cache->l1tab[idx]) == pfn);
+ goto out;
+ }
+
spin_lock(&cache->lock);
/* Has some other CPU caused a wrap? We must flush if so. */
@@ -70,45 +63,97 @@
}
}
- do {
- idx = cache->cursor = (cache->cursor + 1) & (MAPCACHE_ENTRIES - 1);
- if ( unlikely(idx == 0) )
- {
- ASSERT(flush_count++ == 0);
- flush_all_ready_maps();
- perfc_incrc(domain_page_tlb_flush);
- local_flush_tlb();
- cache->shadow_epoch[vcpu] = ++cache->epoch;
- cache->tlbflush_timestamp = tlbflush_current_time();
- }
-
- flags = 0;
- for ( i = 0; i < (1U << order); i++ )
- flags |= l1e_get_flags(cache->l1tab[idx+i]);
- }
- while ( flags & _PAGE_PRESENT );
-
- for ( i = 0; i < (1U << order); i++ )
- cache->l1tab[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR);
+ idx = find_next_zero_bit(cache->inuse, MAPCACHE_ENTRIES, cache->cursor);
+ if ( unlikely(idx >= MAPCACHE_ENTRIES) )
+ {
+ /* /First/, clean the garbage map and update the inuse list. */
+ for ( i = 0; i < ARRAY_SIZE(cache->garbage); i++ )
+ {
+ unsigned long x = xchg(&cache->garbage[i], 0);
+ cache->inuse[i] &= ~x;
+ }
+
+ /* /Second/, flush TLBs. */
+ perfc_incrc(domain_page_tlb_flush);
+ local_flush_tlb();
+ cache->shadow_epoch[vcpu] = ++cache->epoch;
+ cache->tlbflush_timestamp = tlbflush_current_time();
+
+ idx = find_first_zero_bit(cache->inuse, MAPCACHE_ENTRIES);
+ ASSERT(idx < MAPCACHE_ENTRIES);
+ }
+
+ set_bit(idx, cache->inuse);
+ cache->cursor = idx + 1;
spin_unlock(&cache->lock);
+ cache->l1tab[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
+
+ out:
va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
return (void *)va;
}
-void unmap_domain_pages(void *va, unsigned int order)
-{
- unsigned int idx, i;
+void unmap_domain_page(void *va)
+{
+ unsigned int idx;
struct mapcache *cache = ¤t->domain->arch.mapcache;
+ unsigned long pfn;
+ struct vcpu_maphash_entry *hashent;
+
+ ASSERT(!in_irq());
ASSERT((void *)MAPCACHE_VIRT_START <= va);
ASSERT(va < (void *)MAPCACHE_VIRT_END);
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
-
- for ( i = 0; i < (1U << order); i++ )
- l1e_add_flags(cache->l1tab[idx+i], READY_FOR_TLB_FLUSH);
+ pfn = l1e_get_pfn(cache->l1tab[idx]);
+ hashent = &cache->vcpu_maphash[current->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
+
+ if ( hashent->idx == idx )
+ {
+ ASSERT(hashent->pfn == pfn);
+ ASSERT(hashent->refcnt != 0);
+ hashent->refcnt--;
+ }
+ else if ( hashent->refcnt == 0 )
+ {
+ if ( hashent->idx != MAPHASHENT_NOTINUSE )
+ {
+ /* /First/, zap the PTE. */
+ ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->pfn);
+ cache->l1tab[hashent->idx] = l1e_empty();
+ /* /Second/, mark as garbage. */
+ set_bit(hashent->idx, cache->garbage);
+ }
+
+ /* Add newly-freed mapping to the maphash. */
+ hashent->pfn = pfn;
+ hashent->idx = idx;
+ }
+ else
+ {
+ /* /First/, zap the PTE. */
+ cache->l1tab[idx] = l1e_empty();
+ /* /Second/, mark as garbage. */
+ set_bit(idx, cache->garbage);
+ }
+}
+
+void mapcache_init(struct domain *d)
+{
+ unsigned int i, j;
+
+ d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
+ (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
+ spin_lock_init(&d->arch.mapcache.lock);
+
+ /* Mark all maphash entries as not in use. */
+ for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( j = 0; j < MAPHASH_ENTRIES; j++ )
+ d->arch.mapcache.vcpu_maphash[i].hash[j].idx =
+ MAPHASHENT_NOTINUSE;
}
#define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
@@ -128,15 +173,10 @@
spin_lock(&globalmap_lock);
- for ( ; ; )
- {
- idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
- va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
-
- /* End of round? If not then we're done in this loop. */
- if ( va < FIXADDR_START )
- break;
-
+ idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
+ va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
+ if ( unlikely(va >= FIXADDR_START) )
+ {
/* /First/, clean the garbage map and update the inuse list. */
for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
{
@@ -147,7 +187,9 @@
/* /Second/, flush all TLBs to get rid of stale garbage mappings. */
flush_tlb_all();
- inuse_cursor = 0;
+ idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
+ va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
+ ASSERT(va < FIXADDR_START);
}
set_bit(idx, inuse);
diff -r dbe5427f3863 -r 228c96d95c80 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Wed Jan 11 18:22:24 2006
+++ b/xen/include/asm-x86/domain.h Wed Jan 11 18:23:34 2006
@@ -13,13 +13,40 @@
unsigned long eip;
};
+#define MAPHASH_ENTRIES 8
+#define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
+#define MAPHASHENT_NOTINUSE ((u16)~0U)
+struct vcpu_maphash {
+ struct vcpu_maphash_entry {
+ unsigned long pfn;
+ uint16_t idx;
+ uint16_t refcnt;
+ } hash[MAPHASH_ENTRIES];
+} __cacheline_aligned;
+
+#define MAPCACHE_ORDER 10
+#define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
struct mapcache {
+ /* The PTEs that provide the mappings, and a cursor into the array. */
l1_pgentry_t *l1tab;
unsigned int cursor;
+
+ /* Protects map_domain_page(). */
+ spinlock_t lock;
+
+ /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS];
u32 tlbflush_timestamp;
- spinlock_t lock;
+
+ /* Which mappings are in use, and which are garbage to reap next epoch? */
+ unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
+ unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
+
+ /* Lock-free per-VCPU hash of recently-used mappings. */
+ struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
};
+
+extern void mapcache_init(struct domain *d);
struct arch_domain
{
diff -r dbe5427f3863 -r 228c96d95c80 xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h Wed Jan 11 18:22:24 2006
+++ b/xen/include/xen/domain_page.h Wed Jan 11 18:23:34 2006
@@ -10,24 +10,19 @@
#include <xen/config.h>
#include <xen/mm.h>
-#define map_domain_page(pfn) map_domain_pages(pfn,0)
-#define unmap_domain_page(va) unmap_domain_pages(va,0)
-
#ifdef CONFIG_DOMAIN_PAGE
/*
- * Maps a given range of page frames, returning the mapped virtual address. The
- * pages are now accessible within the current VCPU until a corresponding
- * call to unmap_domain_page().
+ * Map a given page frame, returning the mapped virtual address. The page is
+ * then accessible within the current VCPU until a corresponding unmap call.
*/
-extern void *map_domain_pages(unsigned long pfn, unsigned int order);
+extern void *map_domain_page(unsigned long pfn);
/*
- * Pass a VA within the first page of a range previously mapped in the context
- * of the currently-executing VCPU via a call to map_domain_pages(). Those
- * pages will then be removed from the mapping lists.
+ * Pass a VA within a page previously mapped in the context of the
+ * currently-executing VCPU via a call to map_domain_pages().
*/
-extern void unmap_domain_pages(void *va, unsigned int order);
+extern void unmap_domain_page(void *va);
/*
* Similar to the above calls, except the mapping is accessible in all
@@ -97,8 +92,8 @@
#else /* !CONFIG_DOMAIN_PAGE */
-#define map_domain_pages(pfn,order) phys_to_virt((pfn)<<PAGE_SHIFT)
-#define unmap_domain_pages(va,order) ((void)((void)(va),(void)(order)))
+#define map_domain_page(pfn) phys_to_virt((pfn)<<PAGE_SHIFT)
+#define unmap_domain_page(va) ((void)(va))
#define map_domain_page_global(pfn) phys_to_virt((pfn)<<PAGE_SHIFT)
#define unmap_domain_page_global(va) ((void)(va))
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|