From b8313d7060e9499fa7580265bda703cce05fe46a Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 4 Dec 2023 14:15:45 +0100 Subject: [PATCH] xen/sched: fix sched_move_domain() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do cleanup in sched_move_domain() in a dedicated service function, which is called either in error case with newly allocated data, or in success case with the old data to be freed. This will at once fix some subtle bugs which sneaked in due to forgetting to overwrite some pointers in the error case. Fixes: 70fadc41635b ("xen/cpupool: support moving domain between cpupools with different granularity") Reported-by: René Winther Højgaard Initial-fix-by: Jan Beulich Initial-fix-by: George Dunlap Signed-off-by: Juergen Gross --- xen/common/sched/core.c | 47 +++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c index eba0cea4bb..fd92e9b13a 100644 --- a/xen/common/sched/core.c +++ b/xen/common/sched/core.c @@ -647,6 +647,24 @@ static void sched_move_irqs(const struct sched_unit *unit) vcpu_move_irqs(v); } +static void sched_move_domain_cleanup(struct scheduler *ops, + struct sched_unit *units, + void *domdata) +{ + struct sched_unit *unit, *old_unit; + + for ( unit = units; unit; ) + { + if ( unit->priv ) + sched_free_udata(ops, unit->priv); + old_unit = unit; + unit = unit->next_in_list; + xfree(old_unit); + } + + sched_free_domdata(ops, domdata); +} + /* * Move a domain from one cpupool to another. * @@ -686,7 +704,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c) void *old_domdata; unsigned int gran = cpupool_get_granularity(c); unsigned int n_units = d->vcpu[0] ? DIV_ROUND_UP(d->max_vcpus, gran) : 0; - int ret = 0; for_each_vcpu ( d, v ) { @@ -699,8 +716,9 @@ int sched_move_domain(struct domain *d, struct cpupool *c) domdata = sched_alloc_domdata(c->sched, d); if ( IS_ERR(domdata) ) { - ret = PTR_ERR(domdata); - goto out; + rcu_read_unlock(&sched_res_rculock); + + return PTR_ERR(domdata); } for ( unit_idx = 0; unit_idx < n_units; unit_idx++ ) @@ -718,10 +736,10 @@ int sched_move_domain(struct domain *d, struct cpupool *c) if ( !unit || !unit->priv ) { - old_units = new_units; - old_domdata = domdata; - ret = -ENOMEM; - goto out_free; + sched_move_domain_cleanup(c->sched, new_units, domdata); + rcu_read_unlock(&sched_res_rculock); + + return -ENOMEM; } unit_ptr = &unit->next_in_list; @@ -808,22 +826,11 @@ int sched_move_domain(struct domain *d, struct cpupool *c) domain_unpause(d); - out_free: - for ( unit = old_units; unit; ) - { - if ( unit->priv ) - sched_free_udata(c->sched, unit->priv); - old_unit = unit; - unit = unit->next_in_list; - xfree(old_unit); - } - - sched_free_domdata(old_ops, old_domdata); + sched_move_domain_cleanup(old_ops, old_units, old_domdata); - out: rcu_read_unlock(&sched_res_rculock); - return ret; + return 0; } void sched_destroy_vcpu(struct vcpu *v) -- 2.35.3