[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 2/3] arinc: Add cpu-pool support to scheduler.



On 18/11/2013 20:16, Nathan Studer wrote:
> From: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx>
>
> 1.  Remove the restriction that dom0 must be in the schedule, since dom-0 may
> not belong to the scheduler's pool.
> 2.  Add a schedule entry for each of dom-0's vcpus as they are created.
> 3.  Add code to deal with empty schedules in the do_schedule function.
> 4.  Call the correct idle task for the pcpu on which the scheduling decision
> is being made in do_schedule.
> 5.  Add code to prevent migration of a vcpu.
> 6.  Implement a proper cpu_pick function, which prefers the current processor.
>
> These changes do not implement arinc653 multicore.  Since the schedule only
> supports 1 vcpu entry per slot, even if the vcpus of a domain are run on
> multiple pcpus, the scheduler will essentially serialize their execution.
>
> Signed-off-by: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx>
> ---
>  xen/common/sched_arinc653.c |   76 
> +++++++++++++++++++++++++++++--------------
>  1 file changed, 52 insertions(+), 24 deletions(-)
>
> diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
> index f4eb943..139177e 100644
> --- a/xen/common/sched_arinc653.c
> +++ b/xen/common/sched_arinc653.c
> @@ -41,6 +41,11 @@
>   **************************************************************************/
>  
>  /**
> + * Default timeslice for domain 0.
> + */
> +#define DEFAULT_TIMESLICE MILLISECS(10)
> +
> +/**
>   * Retrieve the idle VCPU for a given physical CPU
>   */
>  #define IDLETASK(cpu)  (idle_vcpu[cpu])
> @@ -224,8 +229,6 @@ arinc653_sched_set(
>  {
>      a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
>      s_time_t total_runtime = 0;
> -    bool_t found_dom0 = 0;
> -    const static xen_domain_handle_t dom0_handle = {0};
>      unsigned int i;
>  
>      /* Check for valid major frame and number of schedule entries. */
> @@ -236,10 +239,6 @@ arinc653_sched_set(
>  
>      for ( i = 0; i < schedule->num_sched_entries; i++ )
>      {
> -        if ( dom_handle_cmp(schedule->sched_entries[i].dom_handle,
> -                            dom0_handle) == 0 )
> -            found_dom0 = 1;
> -
>          /* Check for a valid VCPU ID and run time. */
>          if ( (schedule->sched_entries[i].vcpu_id >= MAX_VIRT_CPUS)
>               || (schedule->sched_entries[i].runtime <= 0) )
> @@ -249,10 +248,6 @@ arinc653_sched_set(
>          total_runtime += schedule->sched_entries[i].runtime;
>      }
>  
> -    /* Error if the schedule doesn't contain a slot for domain 0. */
> -    if ( !found_dom0 )
> -        goto fail;
> -
>      /*
>       * Error if the major frame is not large enough to run all entries as
>       * indicated by comparing the total run time to the major frame length.
> @@ -347,12 +342,6 @@ a653sched_init(struct scheduler *ops)
>  
>      ops->sched_data = prv;
>  
> -    prv->schedule[0].dom_handle[0] = '\0';
> -    prv->schedule[0].vcpu_id = 0;
> -    prv->schedule[0].runtime = MILLISECS(10);
> -    prv->schedule[0].vc = NULL;
> -    prv->num_schedule_entries = 1;
> -    prv->major_frame = MILLISECS(10);
>      prv->next_major_frame = 0;
>      INIT_LIST_HEAD(&prv->vcpu_list);
>  
> @@ -380,7 +369,9 @@ a653sched_deinit(const struct scheduler *ops)
>  static void *
>  a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
>  {
> +    a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
>      arinc653_vcpu_t *svc;
> +    int entry;

sched_priv->num_schedule_entries is inconsistently used as signed and
unsigned.  It should be an unsigned value, and updated to be so
everywhere, including in the a653sched_priv_t structure.

>  
>      /*
>       * Allocate memory for the ARINC 653-specific scheduler data information
> @@ -390,6 +381,19 @@ a653sched_alloc_vdata(const struct scheduler *ops, 
> struct vcpu *vc, void *dd)
>      if ( svc == NULL )
>          return NULL;
>  
> +    /* add every one of dom0's vcpus to the schedule */
> +    if (vc->domain->domain_id == 0)

Xen style would include spaces immediately inside the brackets.

Also, it looks like you could do with a bounds check against
ARINC653_MAX_DOMAINS_PER_SCHEDULE before trying to put another dom0 into
the mix.

> +    {
> +        entry = sched_priv->num_schedule_entries;
> +        sched_priv->schedule[entry].dom_handle[0] = '\0';
> +        sched_priv->schedule[entry].vcpu_id = vc->vcpu_id;
> +        sched_priv->schedule[entry].runtime = DEFAULT_TIMESLICE;
> +        sched_priv->schedule[entry].vc = vc;
> +
> +        sched_priv->major_frame += DEFAULT_TIMESLICE;
> +        ++sched_priv->num_schedule_entries;
> +    }
> +
>      /*
>       * Initialize our ARINC 653 scheduler-specific information for the VCPU.
>       * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
> @@ -450,7 +454,7 @@ a653sched_alloc_pdata(const struct scheduler *ops, int 
> cpu)
>  static void
>  a653sched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
>  {
> -    /* nop */
> +    /* NOP */

This change seems a little pointless.

>  }
>  
>  /**
> @@ -538,8 +542,13 @@ a653sched_do_schedule(
>      static int sched_index = 0;
>      static s_time_t next_switch_time;
>      a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
> +    const int cpu = smp_processor_id();

This should be an unsigned int.

>  
> -    if ( now >= sched_priv->next_major_frame )
> +    if ( sched_priv->num_schedule_entries < 1 )
> +    {
> +        sched_priv->next_major_frame = now + DEFAULT_TIMESLICE;
> +    }

Xen style would require these braces to be omitted.

> +    else if ( now >= sched_priv->next_major_frame )
>      {
>          /* time to enter a new major frame
>           * the first time this function is called, this will be true */
> @@ -574,14 +583,14 @@ a653sched_do_schedule(
>       */
>      new_task = (sched_index < sched_priv->num_schedule_entries)
>          ? sched_priv->schedule[sched_index].vc
> -        : IDLETASK(0);
> +        : IDLETASK(cpu);
>  
>      /* Check to see if the new task can be run (awake & runnable). */
>      if ( !((new_task != NULL)
>             && (AVCPU(new_task) != NULL)
>             && AVCPU(new_task)->awake
>             && vcpu_runnable(new_task)) )
> -        new_task = IDLETASK(0);
> +        new_task = IDLETASK(cpu);
>      BUG_ON(new_task == NULL);
>  
>      /*
> @@ -592,7 +601,12 @@ a653sched_do_schedule(
>  
>      /* Tasklet work (which runs in idle VCPU context) overrides all else. */
>      if ( tasklet_work_scheduled )
> -        new_task = IDLETASK(0);
> +        new_task = IDLETASK(cpu);
> +
> +    /* Running this task would result in a migration */
> +    if ( (!is_idle_vcpu(new_task))

The brackets around !is_idle_vcpu() are useless.

> +         && (new_task->processor != cpu) )
> +        new_task = IDLETASK(cpu);
>  
>      /*
>       * Return the amount of time the next domain has to run and the address
> @@ -600,7 +614,7 @@ a653sched_do_schedule(
>       */
>      ret.time = next_switch_time - now;
>      ret.task = new_task;
> -    ret.migrated = 0;               /* we do not support migration */
> +    ret.migrated = 0;
>  
>      BUG_ON(ret.time <= 0);
>  
> @@ -618,8 +632,22 @@ a653sched_do_schedule(
>  static int
>  a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
>  {
> -    /* this implementation only supports one physical CPU */
> -    return 0;
> +    cpumask_t *online;
> +    int cpu;

unsigned int.

> +
> +    /* If present, prefer vc's current processor, else
> +       just find the first valid vcpu */

Xen style would be:

/*
 * If present....
 */

> +    online = cpupool_scheduler_cpumask(vc->domain->cpupool);
> +
> +    cpu = cpumask_first(online);
> +
> +    if ( (cpumask_test_cpu(vc->processor, online)) ||
> +      (cpu >= nr_cpu_ids) )
> +    {
> +        cpu = vc->processor;
> +    }

superfluous brackets and braces.

~Andrew

> +
> +    return cpu;
>  }
>  
>  /**


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.