+static inline
+int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
+ struct p2m_domain *ap2m, p2m_access_t a,
+ gfn_t gfn)
+{
+ p2m_type_t p2mt;
+ xenmem_access_t xma_old;
+ paddr_t gpa = pfn_to_paddr(gfn_x(gfn));
+ paddr_t maddr, mask = 0;
+ unsigned int level;
+ unsigned long mattr;
+ int rc;
+
+ static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+ ACCESS(rx2rw),
+ ACCESS(n2rwx),
+#undef ACCESS
+ };
+
+ /* Check if entry is part of the altp2m view. */
+ spin_lock(&ap2m->lock);
+ maddr = __p2m_lookup(ap2m, gpa, &p2mt);
+ spin_unlock(&ap2m->lock);
+
+ /* Check host p2m if no valid entry in ap2m. */
+ if ( maddr == INVALID_PADDR )
+ {
+ /* Check if entry is part of the host p2m view. */
+ spin_lock(&hp2m->lock);
+ maddr = __p2m_lookup(hp2m, gpa, &p2mt);
+ if ( maddr == INVALID_PADDR || p2mt != p2m_ram_rw )
+ goto out;
+
+ rc = __p2m_get_mem_access(hp2m, gfn, &xma_old);
+ if ( rc )
+ goto out;
+
+ rc = p2m_get_gfn_level_and_attr(hp2m, gpa, &level, &mattr);
+ if ( rc )
+ goto out;
+ spin_unlock(&hp2m->lock);
+
+ mask = level_masks[level];
+
+ /* If this is a superpage, copy that first. */
+ if ( level != 3 )
+ {
+ rc = apply_p2m_changes(d, ap2m, INSERT,
+ gpa & mask,
+ (gpa + level_sizes[level]) & mask,
+ maddr & mask, mattr, 0, p2mt,
+ memaccess[xma_old]);
+ if ( rc < 0 )
+ goto out;
+ }
+ }
+ else
+ {
+ spin_lock(&ap2m->lock);
+ rc = p2m_get_gfn_level_and_attr(ap2m, gpa, &level, &mattr);
+ spin_unlock(&ap2m->lock);
+ if ( rc )
+ goto out;
+ }
+
+ /* Set mem access attributes - currently supporting only one (4K) page. */
+ mask = level_masks[3];
+ return apply_p2m_changes(d, ap2m, INSERT,
+ gpa & mask,
+ (gpa + level_sizes[level]) & mask,
+ maddr & mask, mattr, 0, p2mt, a);
+
+out:
+ if ( spin_is_locked(&hp2m->lock) )
+ spin_unlock(&hp2m->lock);