WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

RE: [Xen-devel] [patch] (resend) mask out nx bits when calculatingpfn/mf

To: "Scott Parish" <srparish@xxxxxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: RE: [Xen-devel] [patch] (resend) mask out nx bits when calculatingpfn/mfn
From: "Nakajima, Jun" <jun.nakajima@xxxxxxxxx>
Date: Tue, 7 Jun 2005 09:58:20 -0700
Delivery-date: Tue, 07 Jun 2005 16:57:36 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcVrf/pWTi3w5DFCRxCUaMx8scx14QAAER1Q
Thread-topic: [Xen-devel] [patch] (resend) mask out nx bits when calculatingpfn/mfn
But did you really see the NX bit? I think that (NX bit) would be a good
catch and it explains several failures of device drivers.  We should fix
the creator of the pte (by __supported_pte_mask), not the consumer of
it.

We also need to fix Xen because Xen should not reject the request just
because it has the NX bit on.

Jun
---
Intel Open Source Technology Center 

-----Original Message-----
From: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
[mailto:xen-devel-bounces@xxxxxxxxxxxxxxxxxxx] On Behalf Of Scott Parish
Sent: Tuesday, June 07, 2005 9:00 AM
To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: Re: [Xen-devel] [patch] (resend) mask out nx bits when
calculatingpfn/mfn

Please ignore the first patch, it had a messed up macro.

sRp

On Tue, Jun 07, 2005 at 03:52:50PM +0000, Scott Parish wrote:

> This patch fixes a problem where it was possible (and seen) for the
> NX bit to not be masked out when calculating a pfn or mfn.
> 
> 
> sRp
> 
> -- 
> Scott Parish
> Signed-off-by: srparish@xxxxxxxxxx

> diff -rN -u -p
old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
> ---
old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
2005-05-24 17:34:54.000000000 +0000
> +++
new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c
2005-06-07 15:35:43.000000000 +0000
> @@ -78,7 +78,7 @@ xen_contig_memory(unsigned long vstart, 
>       pud_t         *pud; 
>       pmd_t         *pmd;
>       pte_t         *pte;
> -     unsigned long  pfn, i, flags;
> +     unsigned long  mfn, i, flags;
>  
>       scrub_pages(vstart, 1 << order);
>  
> @@ -90,16 +90,16 @@ xen_contig_memory(unsigned long vstart, 
>               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
>               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
>               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
> -             pfn = pte->pte >> PAGE_SHIFT;
> +             mfn = pte_mfn(*pte);
>               xen_l1_entry_update(pte, 0);
>               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
>                       (u32)INVALID_P2M_ENTRY;
>               if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
> -                                       &pfn, 1, 0) != 1) BUG();
> +                                       &mfn, 1, 0) != 1) BUG();
>       }
>       /* 2. Get a new contiguous memory extent. */
>       if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
> -                               &pfn, 1, order) != 1) BUG();
> +                               &mfn, 1, order) != 1) BUG();
>       /* 3. Map the new extent in place of old pages. */
>       for (i = 0; i < (1<<order); i++) {
>               pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
> @@ -107,12 +107,12 @@ xen_contig_memory(unsigned long vstart, 
>               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
>               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
>               xen_l1_entry_update(
> -                     pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
> +                     pte, ((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
>               xen_machphys_update(
> -                     pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
> +                     mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
>               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
> -                     pfn+i;
> +                     mfn+i;
>       }
>       /* Flush updates through and flush the TLB. */
>       xen_tlb_flush();
> diff -rN -u -p
old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
> ---
old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
2005-06-06 16:39:54.000000000 +0000
> +++
new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c
2005-06-07 15:39:31.000000000 +0000
> @@ -256,7 +256,7 @@ unsigned long allocate_empty_lowmem_regi
>          pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
>          pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
>          pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
> -        pfn_array[i] = pte->pte >> PAGE_SHIFT;
> +        pfn_array[i] = pte_mfn(*pte);
>          xen_l1_entry_update(pte, 0);
>          phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
>              (u32)INVALID_P2M_ENTRY;
> diff -rN -u -p
old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
> --- old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
2005-06-02 23:09:31.000000000 +0000
> +++ new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c
2005-06-07 15:39:50.000000000 +0000
> @@ -395,7 +395,7 @@ unsigned long get_machine_pfn(unsigned l
>          pmd_t* pmd = pmd_offset(pud, addr);
>          pte_t *pte = pte_offset_kernel(pmd, addr);
>          
> -        return (pte->pte >> PAGE_SHIFT);
> +        return pte_mfn(*pte);
>  } 
>  
>  #define ALIGN_TO_4K __attribute__((section(".data.page_aligned")))
> diff -rN -u -p
old-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.
h
new-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.
h
> ---
old-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.
h       2005-05-28 09:20:36.000000000 +0000
> +++
new-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.
h       2005-06-07 15:32:47.000000000 +0000
> @@ -277,9 +277,10 @@ static inline unsigned long pud_bad(pud_
>   */
>  #define INVALID_P2M_ENTRY (~0UL)
>  #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned
long)*8)-1)))
> +#define pte_mfn(_pte) ((_pte).pte & PTE_MASK) >> PAGE_SHIFT;
>  #define pte_pfn(_pte)
\
>  ({
\
> -     unsigned long mfn = (_pte).pte >> PAGE_SHIFT;
\
> +     unsigned long mfn = pte_mfn(_pte);
\
>       unsigned pfn = mfn_to_pfn(mfn);
\
>       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))
\
>               pfn = max_mapnr; /* special: force !pfn_valid() */
\

> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel


-- 
Scott Parish
Signed-off-by: srparish@xxxxxxxxxx

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel