[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 05/18] arm/altp2m: Add HVMOP_altp2m_set_domain_state.



Hello Julien,


On 07/04/2016 05:39 PM, Julien Grall wrote:
> Hello Sergej,
>
> On 04/07/16 12:45, Sergej Proskurin wrote:
>> diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
>> index 8e8e0f7..cb90a55 100644
>> --- a/xen/arch/arm/hvm.c
>> +++ b/xen/arch/arm/hvm.c
>> @@ -104,8 +104,36 @@ static int
>> do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
>>           break;
>>
>>       case HVMOP_altp2m_set_domain_state:
>> -        rc = -EOPNOTSUPP;
>> +    {
>
> I cannot find anything in the code which prevents this sub-op to be
> called concurrently. Did I miss something?
>

Please correct me if I am wrong, but is the rcu-lock not already
sufficient, which is locked in the same function above?

[...]

d = (a.cmd != HVMOP_altp2m_vcpu_enable_notify) ?
    rcu_lock_domain_by_any_id(a.domain) : rcu_lock_current_domain();

[...]

>> +        struct vcpu *v;
>> +        bool_t ostate;
>> +
>> +        if ( !d->arch.hvm_domain.params[HVM_PARAM_ALTP2M] )
>> +        {
>> +            rc = -EINVAL;
>> +            break;
>> +        }
>> +
>> +        ostate = d->arch.altp2m_active;
>> +        d->arch.altp2m_active = !!a.u.domain_state.state;
>> +
>> +        /* If the alternate p2m state has changed, handle
>> appropriately */
>> +        if ( d->arch.altp2m_active != ostate &&
>> +             (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
>> +        {
>> +            for_each_vcpu( d, v )
>> +            {
>> +                if ( !ostate )
>> +                    altp2m_vcpu_initialise(v);
>> +                else
>> +                    altp2m_vcpu_destroy(v);
>> +            }
>> +
>> +            if ( ostate )
>> +                p2m_flush_altp2m(d);
>> +        }
>>           break;
>> +    }
>>
>>       case HVMOP_altp2m_vcpu_enable_notify:
>>           rc = -EOPNOTSUPP;
>> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
>> index e72ca7a..4a745fd 100644
>> --- a/xen/arch/arm/p2m.c
>> +++ b/xen/arch/arm/p2m.c
>> @@ -2064,6 +2064,52 @@ int p2m_get_mem_access(struct domain *d, gfn_t
>> gfn,
>>       return ret;
>>   }
>>
>
> The 3 helpers below are altp2m specific so I would move them in altp2m.c
>

I will do that, thank you.

>> +struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
>> +{
>> +    unsigned int index = vcpu_altp2m(v).p2midx;
>> +
>> +    if ( index == INVALID_ALTP2M )
>> +        return NULL;
>> +
>> +    BUG_ON(index >= MAX_ALTP2M);
>> +
>> +    return v->domain->arch.altp2m_p2m[index];
>> +}
>> +
>> +static void p2m_init_altp2m_helper(struct domain *d, unsigned int i)
>> +{
>> +    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
>> +    struct vttbr_data *vttbr = &p2m->vttbr;
>> +
>> +    p2m->lowest_mapped_gfn = INVALID_GFN;
>> +    p2m->max_mapped_gfn = 0;
>
> Would not it be easier to reallocate p2m from scratch everytime you
> enable it?
>

Do you mean instead of dynamically allocating memory for all altp2m_p2m
entries at once in p2m_init_altp2m by means of p2m_init_one? If yes,
then I agree. Thankyou.

>> +
>> +    vttbr->vttbr_baddr = page_to_maddr(p2m->root);
>> +    vttbr->vttbr_vmid = p2m->vmid;
>> +
>> +    d->arch.altp2m_vttbr[i] = vttbr->vttbr;
>> +}
>> +
>> +int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
>> +{
>> +    int rc = -EINVAL;
>> +
>> +    if ( idx >= MAX_ALTP2M )
>> +        return rc;
>> +
>> +    altp2m_lock(d);
>> +
>> +    if ( d->arch.altp2m_vttbr[idx] == INVALID_MFN )
>> +    {
>> +        p2m_init_altp2m_helper(d, idx);
>> +        rc = 0;
>> +    }
>> +
>> +    altp2m_unlock(d);
>> +
>> +    return rc;
>> +}
>> +
>>   /*
>>    * Local variables:
>>    * mode: C
>
> [...]
>
>> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
>> index 6b9770f..8bcd618 100644
>> --- a/xen/include/asm-arm/domain.h
>> +++ b/xen/include/asm-arm/domain.h
>> @@ -138,6 +138,12 @@ struct arch_domain
>>       uint64_t altp2m_vttbr[MAX_ALTP2M];
>>   }  __cacheline_aligned;
>>
>> +struct altp2mvcpu {
>> +    uint16_t p2midx; /* alternate p2m index */
>> +};
>> +
>> +#define vcpu_altp2m(v) ((v)->arch.avcpu)
>> +
>>   struct arch_vcpu
>>   {
>>       struct {
>> @@ -267,6 +273,9 @@ struct arch_vcpu
>>       struct vtimer phys_timer;
>>       struct vtimer virt_timer;
>>       bool_t vtimer_initialized;
>> +
>> +    /* Alternate p2m context */
>> +    struct altp2mvcpu avcpu;
>>   }  __cacheline_aligned;
>>
>>   void vcpu_show_execution_state(struct vcpu *);
>> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
>> index a78d547..8ee78e0 100644
>> --- a/xen/include/asm-arm/p2m.h
>> +++ b/xen/include/asm-arm/p2m.h
>> @@ -121,6 +121,25 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
>>       /* Not supported on ARM. */
>>   }
>>
>> +/*
>> + * Alternate p2m: shadow p2m tables used for alternate memory views.
>> + */
>> +
>> +#define altp2m_lock(d)      spin_lock(&(d)->arch.altp2m_lock)
>> +#define altp2m_unlock(d)    spin_unlock(&(d)->arch.altp2m_lock)
>> +
>> +/* Get current alternate p2m table */
>> +struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
>> +
>> +/* Flush all the alternate p2m's for a domain */
>> +static inline void p2m_flush_altp2m(struct domain *d)
>> +{
>> +    /* Not supported on ARM. */
>> +}
>> +
>> +/* Make a specific alternate p2m valid */
>> +int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);
>> +
>
> Please move anything related to altp2m in altp2m.h
>

I will do that, thank you Julien.

>>   #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
>>   #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
>>
>>
>
> Regards,
>

Best regards,
Sergej


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.