|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] credit1: Use atomic bit operations for the flags structure
Ping?
On Wed, Feb 27, 2013 at 2:29 PM, George Dunlap
<george.dunlap@xxxxxxxxxxxxx> wrote:
> The flags structure is not protected by locks (or more precisely,
> it is protected using an inconsistent set of locks); we therefore need
> to make sure that all accesses are atomic-safe. This is particulary
> important in the case of the PARKED flag, which if clobbered while
> changing the YIELD bit will leave a vcpu wedged in an offline state.
>
> Using the atomic bitops also requires us to change the size of the "flags"
> element.
>
> Spotted-by: Igor Pavlikevich <ipavlikevich@xxxxxxxxx>
> Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> ---
> xen/common/sched_credit.c | 23 ++++++++++-------------
> 1 file changed, 10 insertions(+), 13 deletions(-)
>
> diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
> index df2d076..ecdbd76 100644
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -46,8 +46,8 @@
> /*
> * Flags
> */
> -#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */
> -#define CSCHED_FLAG_VCPU_YIELD 0x0002 /* VCPU yielding */
> +#define CSCHED_FLAG_VCPU_PARKED 0x0 /* VCPU over capped credits */
> +#define CSCHED_FLAG_VCPU_YIELD 0x1 /* VCPU yielding */
>
>
> /*
> @@ -137,7 +137,7 @@ struct csched_vcpu {
> struct vcpu *vcpu;
> atomic_t credit;
> s_time_t start_time; /* When we were scheduled (used for credit) */
> - uint16_t flags;
> + unsigned flags;
> int16_t pri;
> #ifdef CSCHED_STATS
> struct {
> @@ -220,7 +220,7 @@ __runq_insert(unsigned int cpu, struct csched_vcpu *svc)
> /* If the vcpu yielded, try to put it behind one lower-priority
> * runnable vcpu if we can. The next runq_sort will bring it forward
> * within 30ms if the queue too long. */
> - if ( svc->flags & CSCHED_FLAG_VCPU_YIELD
> + if ( test_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags)
> && __runq_elem(iter)->pri > CSCHED_PRI_IDLE )
> {
> iter=iter->next;
> @@ -811,7 +811,7 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu
> *vc)
> * those.
> */
> if ( svc->pri == CSCHED_PRI_TS_UNDER &&
> - !(svc->flags & CSCHED_FLAG_VCPU_PARKED) )
> + !test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
> {
> svc->pri = CSCHED_PRI_TS_BOOST;
> }
> @@ -824,10 +824,10 @@ csched_vcpu_wake(const struct scheduler *ops, struct
> vcpu *vc)
> static void
> csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc)
> {
> - struct csched_vcpu * const sv = CSCHED_VCPU(vc);
> + struct csched_vcpu * const svc = CSCHED_VCPU(vc);
>
> /* Let the scheduler know that this vcpu is trying to yield */
> - sv->flags |= CSCHED_FLAG_VCPU_YIELD;
> + set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags);
> }
>
> static int
> @@ -1151,11 +1151,10 @@ csched_acct(void* dummy)
> /* Park running VCPUs of capped-out domains */
> if ( sdom->cap != 0U &&
> credit < -credit_cap &&
> - !(svc->flags & CSCHED_FLAG_VCPU_PARKED) )
> + !test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags)
> )
> {
> SCHED_STAT_CRANK(vcpu_park);
> vcpu_pause_nosync(svc->vcpu);
> - svc->flags |= CSCHED_FLAG_VCPU_PARKED;
> }
>
> /* Lower bound on credits */
> @@ -1171,7 +1170,7 @@ csched_acct(void* dummy)
> svc->pri = CSCHED_PRI_TS_UNDER;
>
> /* Unpark any capped domains whose credits go positive */
> - if ( svc->flags & CSCHED_FLAG_VCPU_PARKED)
> + if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED,
> &svc->flags) )
> {
> /*
> * It's important to unset the flag AFTER the unpause()
> @@ -1180,7 +1179,6 @@ csched_acct(void* dummy)
> */
> SCHED_STAT_CRANK(vcpu_unpark);
> vcpu_unpause(svc->vcpu);
> - svc->flags &= ~CSCHED_FLAG_VCPU_PARKED;
> }
>
> /* Upper bound on credits means VCPU stops earning */
> @@ -1442,8 +1440,7 @@ csched_schedule(
> /*
> * Clear YIELD flag before scheduling out
> */
> - if ( scurr->flags & CSCHED_FLAG_VCPU_YIELD )
> - scurr->flags &= ~(CSCHED_FLAG_VCPU_YIELD);
> + clear_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags);
>
> /*
> * SMP Load balance:
> --
> 1.7.9.5
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |