[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 7/8] pdx: introduce a new compression algorithm based on region offsets


  • To: Roger Pau Monne <roger.pau@xxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Tue, 5 Aug 2025 14:28:22 +0200
  • Autocrypt: addr=jbeulich@xxxxxxxx; keydata= xsDiBFk3nEQRBADAEaSw6zC/EJkiwGPXbWtPxl2xCdSoeepS07jW8UgcHNurfHvUzogEq5xk hu507c3BarVjyWCJOylMNR98Yd8VqD9UfmX0Hb8/BrA+Hl6/DB/eqGptrf4BSRwcZQM32aZK 7Pj2XbGWIUrZrd70x1eAP9QE3P79Y2oLrsCgbZJfEwCgvz9JjGmQqQkRiTVzlZVCJYcyGGsD /0tbFCzD2h20ahe8rC1gbb3K3qk+LpBtvjBu1RY9drYk0NymiGbJWZgab6t1jM7sk2vuf0Py O9Hf9XBmK0uE9IgMaiCpc32XV9oASz6UJebwkX+zF2jG5I1BfnO9g7KlotcA/v5ClMjgo6Gl MDY4HxoSRu3i1cqqSDtVlt+AOVBJBACrZcnHAUSuCXBPy0jOlBhxPqRWv6ND4c9PH1xjQ3NP nxJuMBS8rnNg22uyfAgmBKNLpLgAGVRMZGaGoJObGf72s6TeIqKJo/LtggAS9qAUiuKVnygo 3wjfkS9A3DRO+SpU7JqWdsveeIQyeyEJ/8PTowmSQLakF+3fote9ybzd880fSmFuIEJldWxp Y2ggPGpiZXVsaWNoQHN1c2UuY29tPsJgBBMRAgAgBQJZN5xEAhsDBgsJCAcDAgQVAggDBBYC AwECHgECF4AACgkQoDSui/t3IH4J+wCfQ5jHdEjCRHj23O/5ttg9r9OIruwAn3103WUITZee e7Sbg12UgcQ5lv7SzsFNBFk3nEQQCACCuTjCjFOUdi5Nm244F+78kLghRcin/awv+IrTcIWF hUpSs1Y91iQQ7KItirz5uwCPlwejSJDQJLIS+QtJHaXDXeV6NI0Uef1hP20+y8qydDiVkv6l IreXjTb7DvksRgJNvCkWtYnlS3mYvQ9NzS9PhyALWbXnH6sIJd2O9lKS1Mrfq+y0IXCP10eS FFGg+Av3IQeFatkJAyju0PPthyTqxSI4lZYuJVPknzgaeuJv/2NccrPvmeDg6Coe7ZIeQ8Yj t0ARxu2xytAkkLCel1Lz1WLmwLstV30g80nkgZf/wr+/BXJW/oIvRlonUkxv+IbBM3dX2OV8 AmRv1ySWPTP7AAMFB/9PQK/VtlNUJvg8GXj9ootzrteGfVZVVT4XBJkfwBcpC/XcPzldjv+3 HYudvpdNK3lLujXeA5fLOH+Z/G9WBc5pFVSMocI71I8bT8lIAzreg0WvkWg5V2WZsUMlnDL9 mpwIGFhlbM3gfDMs7MPMu8YQRFVdUvtSpaAs8OFfGQ0ia3LGZcjA6Ik2+xcqscEJzNH+qh8V m5jjp28yZgaqTaRbg3M/+MTbMpicpZuqF4rnB0AQD12/3BNWDR6bmh+EkYSMcEIpQmBM51qM EKYTQGybRCjpnKHGOxG0rfFY1085mBDZCH5Kx0cl0HVJuQKC+dV2ZY5AqjcKwAxpE75MLFkr wkkEGBECAAkFAlk3nEQCGwwACgkQoDSui/t3IH7nnwCfcJWUDUFKdCsBH/E5d+0ZnMQi+G0A nAuWpQkjM1ASeQwSHEeAWPgskBQL
  • Cc: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>, Community Manager <community.manager@xxxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Tue, 05 Aug 2025 12:28:39 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 05.08.2025 11:52, Roger Pau Monne wrote:
> --- a/xen/common/pdx.c
> +++ b/xen/common/pdx.c
> @@ -24,6 +24,7 @@
>  #include <xen/param.h>
>  #include <xen/pfn.h>
>  #include <xen/sections.h>
> +#include <xen/sort.h>
>  
>  /**
>   * Maximum (non-inclusive) usable pdx. Must be
> @@ -40,6 +41,12 @@ bool __mfn_valid(unsigned long mfn)
>  
>  #ifdef CONFIG_PDX_MASK_COMPRESSION
>      invalid |= mfn & pfn_hole_mask;
> +#elif defined(CONFIG_PDX_OFFSET_COMPRESSION)
> +{
> +    unsigned long base = pfn_bases[PFN_TBL_IDX(mfn)];
> +
> +    invalid |= mfn < base || mfn >= base + pdx_region_size;
> +}
>  #endif

Hmm, didn't notice this earlier on: Brace placement looks odd here. I think
they want to be indented by one level, as they aren't starting a function
body.

> @@ -294,7 +308,245 @@ void __init pfn_pdx_compression_reset(void)
>      nr_ranges = 0;
>  }
>  
> -#endif /* CONFIG_PDX_COMPRESSION */
> +#elif defined(CONFIG_PDX_OFFSET_COMPRESSION) /* CONFIG_PDX_MASK_COMPRESSION 
> */
> +
> +unsigned int __ro_after_init pfn_index_shift;
> +unsigned int __ro_after_init pdx_index_shift;
> +
> +unsigned long __ro_after_init pfn_pdx_lookup[CONFIG_PDX_NR_LOOKUP];
> +unsigned long __ro_after_init pdx_pfn_lookup[CONFIG_PDX_NR_LOOKUP];
> +unsigned long __ro_after_init pfn_bases[CONFIG_PDX_NR_LOOKUP];
> +unsigned long __ro_after_init pdx_region_size = ~0UL;

For cache locality, might this last one better also move ahead of the arrays?

> +bool pdx_is_region_compressible(paddr_t base, unsigned long npages)
> +{
> +    unsigned long pfn = PFN_DOWN(base);
> +    unsigned long pfn_base = pfn_bases[PFN_TBL_IDX(pfn)];
> +
> +    return pfn >= pfn_base &&
> +           pfn + npages <= pfn_base + pdx_region_size;
> +}
> +
> +static int __init cf_check cmp_node(const void *a, const void *b)
> +{
> +    const struct pfn_range *l = a;
> +    const struct pfn_range *r = b;
> +
> +    if ( l->base_pfn > r->base_pfn )
> +        return 1;
> +    if ( l->base_pfn < r->base_pfn )
> +        return -1;
> +
> +    return 0;
> +}
> +
> +static void __init cf_check swp_node(void *a, void *b)
> +{
> +    SWAP(a, b);
> +}

This hasn't changed from v3, and still looks wrong to me.

> +bool __init pfn_pdx_compression_setup(paddr_t base)
> +{
> +    unsigned long mask = PFN_DOWN(pdx_init_mask(base)), idx_mask = 0;
> +    unsigned long pages = 0;
> +    unsigned int i;
> +
> +    if ( !nr_ranges )
> +    {
> +        printk(XENLOG_DEBUG "PFN compression disabled%s\n",
> +               pdx_compress ? ": no ranges provided" : "");
> +        return false;
> +    }
> +
> +    if ( nr_ranges > ARRAY_SIZE(ranges) )
> +    {
> +        printk(XENLOG_WARNING
> +               "Too many PFN ranges (%u > %zu), not attempting PFN 
> compression\n",
> +               nr_ranges, ARRAY_SIZE(ranges));
> +        return false;
> +    }
> +
> +    /* Sort ranges by start address. */
> +    sort(ranges, nr_ranges, sizeof(*ranges), cmp_node, swp_node);
> +
> +    for ( i = 0; i < nr_ranges; i++ )
> +    {
> +        unsigned long start = ranges[i].base_pfn;
> +
> +        /*
> +         * Align range base to MAX_ORDER.  This is required so the PDX offset
> +         * for the bits below MAX_ORDER matches the MFN offset, and pages
> +         * greater than the minimal order can be used to populate the
> +         * directmap.
> +         */
> +        ranges[i].base_pfn = start & ~((1UL << MAX_ORDER) - 1);
> +        ranges[i].pages = start + ranges[i].pages - ranges[i].base_pfn;
> +
> +        /*
> +         * Only merge overlapped regions now, leave adjacent regions 
> separated.
> +         * They would be merged later if both use the same index into the
> +         * lookup table.
> +         */
> +        if ( !i ||
> +             ranges[i].base_pfn >=
> +             (ranges[i - 1].base_pfn + ranges[i - 1].pages) )
> +        {
> +            mask |= pdx_region_mask(ranges[i].base_pfn, ranges[i].pages);
> +            continue;
> +        }
> +
> +        ranges[i - 1].pages = ranges[i].base_pfn + ranges[i].pages -
> +                              ranges[i - 1].base_pfn;
> +
> +        if ( i + 1 < nr_ranges )
> +            memmove(&ranges[i], &ranges[i + 1],
> +                    (nr_ranges - (i + 1)) * sizeof(ranges[0]));
> +        else /* last range */
> +            mask |= pdx_region_mask(ranges[i].base_pfn, ranges[i].pages);
> +        nr_ranges--;
> +        i--;
> +    }
> +
> +    /*
> +     * Populate a mask with the non-equal bits of the different ranges, do 
> this
> +     * to calculate the maximum PFN shift to use as the lookup table index.
> +     */
> +    for ( i = 0; i < nr_ranges; i++ )
> +        for ( unsigned int j = 0; j < nr_ranges; j++ )
> +            idx_mask |= (ranges[i].base_pfn & ~mask) ^
> +                        (ranges[j].base_pfn & ~mask);

"mask" is loop invariant - can't the AND-ing be pulled out, after the loop?
Further, isn't it sufficient for the inner loop to start from i + 1?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.