|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 14/70] xen: CFI hardening for init_timer()
Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.
Use cf_check to annotate function pointer targets for the toolchain.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +-
xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +-
xen/arch/x86/hvm/pmtimer.c | 2 +-
xen/arch/x86/hvm/rtc.c | 6 +++---
xen/arch/x86/hvm/viridian/time.c | 2 +-
xen/arch/x86/hvm/vpt.c | 2 +-
xen/arch/x86/irq.c | 4 ++--
xen/arch/x86/nmi.c | 2 +-
xen/arch/x86/time.c | 4 ++--
xen/common/rcupdate.c | 2 +-
xen/common/sched/core.c | 18 +++++++++---------
xen/common/sched/credit.c | 10 ++++------
xen/common/sched/credit2.c | 2 +-
xen/common/sched/rt.c | 5 +++--
xen/drivers/char/ehci-dbgp.c | 2 +-
xen/drivers/char/ns16550.c | 6 +++---
xen/drivers/cpufreq/cpufreq_ondemand.c | 2 +-
17 files changed, 36 insertions(+), 37 deletions(-)
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 6e8901530a69..da0bf85f0223 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -127,7 +127,7 @@ static void mce_amd_checkregs(void *info)
* multiple correctable errors between two polls. In that case,
* increase polling frequency higher than normal.
*/
-static void mce_amd_work_fn(void *data)
+static void cf_check mce_amd_work_fn(void *data)
{
on_each_cpu(mce_amd_checkregs, data, 1);
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c
b/xen/arch/x86/cpu/mcheck/non-fatal.c
index 2679c220a8a2..f7e411c0870e 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -67,7 +67,7 @@ static void mce_checkregs (void *info)
}
}
-static void mce_work_fn(void *data)
+static void cf_check mce_work_fn(void *data)
{
on_each_cpu(mce_checkregs, NULL, 1);
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 97b9e41712fa..808819d1de91 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -124,7 +124,7 @@ static void pmt_update_time(PMTState *s)
/* This function should be called soon after each time the MSB of the
* pmtimer register rolls over, to make sure we update the status
* registers and SCI at least once per rollover */
-static void pmt_timer_callback(void *opaque)
+static void cf_check pmt_timer_callback(void *opaque)
{
PMTState *s = opaque;
uint32_t pmt_cycles_until_flip;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 3150f5f1479b..09d3501276bc 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -217,7 +217,7 @@ static void check_update_timer(RTCState *s)
s->use_timer = 0;
}
-static void rtc_update_timer(void *opaque)
+static void cf_check rtc_update_timer(void *opaque)
{
RTCState *s = opaque;
@@ -230,7 +230,7 @@ static void rtc_update_timer(void *opaque)
spin_unlock(&s->lock);
}
-static void rtc_update_timer2(void *opaque)
+static void cf_check rtc_update_timer2(void *opaque)
{
RTCState *s = opaque;
@@ -421,7 +421,7 @@ static void alarm_timer_update(RTCState *s)
}
}
-static void rtc_alarm_cb(void *opaque)
+static void cf_check rtc_alarm_cb(void *opaque)
{
RTCState *s = opaque;
diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c
index 24ff117edb20..b56fd6766292 100644
--- a/xen/arch/x86/hvm/viridian/time.c
+++ b/xen/arch/x86/hvm/viridian/time.c
@@ -126,7 +126,7 @@ static void stop_stimer(struct viridian_stimer *vs)
vs->started = false;
}
-static void stimer_expire(void *data)
+static void cf_check stimer_expire(void *data)
{
struct viridian_stimer *vs = data;
struct vcpu *v = vs->v;
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 6fdc3e19fe8c..cb1d81bf9e82 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -271,7 +271,7 @@ void pt_restore_timer(struct vcpu *v)
pt_vcpu_unlock(v);
}
-static void pt_timer_fn(void *data)
+static void cf_check pt_timer_fn(void *data)
{
struct periodic_time *pt = data;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index bcf46cd54d16..f9c808455535 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -935,7 +935,7 @@ void alloc_direct_apic_vector(
spin_unlock(&lock);
}
-static void irq_ratelimit_timer_fn(void *data)
+static void cf_check irq_ratelimit_timer_fn(void *data)
{
struct irq_desc *desc, *tmp;
unsigned long flags;
@@ -1129,7 +1129,7 @@ static inline void clear_pirq_eoi(struct domain *d,
unsigned int irq)
static void set_eoi_ready(void *data);
-static void irq_guest_eoi_timer_fn(void *data)
+static void cf_check irq_guest_eoi_timer_fn(void *data)
{
struct irq_desc *desc = data;
unsigned int i, irq = desc - irq_desc;
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index 5c101a9f97b3..c8ae4a5d7460 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -211,7 +211,7 @@ void __init check_nmi_watchdog(void)
return;
}
-static void nmi_timer_fn(void *unused)
+static void cf_check nmi_timer_fn(void *unused)
{
this_cpu(nmi_timer_ticks)++;
set_timer(&this_cpu(nmi_timer), NOW() + MILLISECS(1000));
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 14f7d3fd635e..fac97023bf10 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -815,7 +815,7 @@ static s_time_t __read_platform_stime(u64 platform_time)
return (stime_platform_stamp + scale_delta(diff, &plt_scale));
}
-static void plt_overflow(void *unused)
+static void cf_check plt_overflow(void *unused)
{
int i;
u64 count;
@@ -1855,7 +1855,7 @@ static void time_calibration_nop_rendezvous(void *rv)
static void (*time_calibration_rendezvous_fn)(void *) =
time_calibration_std_rendezvous;
-static void time_calibration(void *unused)
+static void cf_check time_calibration(void *unused)
{
struct calibration_rendezvous r = {
.semaphore = ATOMIC_INIT(0)
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index 2ec5606de5dd..f9dd2584a8b7 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -575,7 +575,7 @@ static void rcu_idle_timer_stop(void)
stop_timer(&rdp->idle_timer);
}
-static void rcu_idle_timer_handler(void* data)
+static void cf_check rcu_idle_timer_handler(void* data)
{
perfc_incr(rcu_idle_timer);
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 4a79971a1d45..cf1ba01b4d87 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -71,10 +71,10 @@ cpumask_t sched_res_mask;
static DEFINE_SPINLOCK(sched_free_cpu_lock);
/* Various timer handlers. */
-static void s_timer_fn(void *unused);
-static void vcpu_periodic_timer_fn(void *data);
-static void vcpu_singleshot_timer_fn(void *data);
-static void poll_timer_fn(void *data);
+static void cf_check s_timer_fn(void *unused);
+static void cf_check vcpu_periodic_timer_fn(void *data);
+static void cf_check vcpu_singleshot_timer_fn(void *data);
+static void cf_check poll_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
@@ -1535,7 +1535,7 @@ long vcpu_yield(void)
return 0;
}
-static void domain_watchdog_timeout(void *data)
+static void cf_check domain_watchdog_timeout(void *data)
{
struct domain *d = data;
@@ -2697,28 +2697,28 @@ static void schedule(void)
}
/* The scheduler timer: force a run through the scheduler */
-static void s_timer_fn(void *unused)
+static void cf_check s_timer_fn(void *unused)
{
raise_softirq(SCHEDULE_SOFTIRQ);
SCHED_STAT_CRANK(sched_irq);
}
/* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
-static void vcpu_periodic_timer_fn(void *data)
+static void cf_check vcpu_periodic_timer_fn(void *data)
{
struct vcpu *v = data;
vcpu_periodic_timer_work(v);
}
/* Per-VCPU single-shot timer function: sends a virtual timer interrupt. */
-static void vcpu_singleshot_timer_fn(void *data)
+static void cf_check vcpu_singleshot_timer_fn(void *data)
{
struct vcpu *v = data;
send_timer_event(v);
}
/* SCHEDOP_poll timeout callback. */
-static void poll_timer_fn(void *data)
+static void cf_check poll_timer_fn(void *data)
{
struct vcpu *v = data;
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index d0aa017c643e..5635271f6fea 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -230,8 +230,8 @@ struct csched_private {
struct timer master_ticker;
};
-static void csched_tick(void *_cpu);
-static void csched_acct(void *dummy);
+static void cf_check csched_tick(void *_cpu);
+static void cf_check csched_acct(void *dummy);
static inline int
__unit_on_runq(const struct csched_unit *svc)
@@ -1356,8 +1356,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int
cpu)
pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
}
-static void
-csched_acct(void* dummy)
+static void cf_check csched_acct(void* dummy)
{
struct csched_private *prv = dummy;
unsigned long flags;
@@ -1563,8 +1562,7 @@ csched_acct(void* dummy)
set_timer( &prv->master_ticker, NOW() + prv->tslice);
}
-static void
-csched_tick(void *_cpu)
+static void cf_check csched_tick(void *_cpu)
{
unsigned int cpu = (unsigned long)_cpu;
const struct sched_resource *sr = get_sched_res(cpu);
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index a5f073cda51e..d96e2749ddfb 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -2072,7 +2072,7 @@ static inline void do_replenish(struct csched2_dom *sdom)
sdom->budget += sdom->tot_budget;
}
-static void replenish_domain_budget(void* data)
+static void cf_check replenish_domain_budget(void *data)
{
struct csched2_dom *sdom = data;
unsigned long flags;
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index c24cd2ac3200..5ea6f01f263c 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -173,7 +173,7 @@
#define TRC_RTDS_SCHED_TASKLET TRC_SCHED_CLASS_EVT(RTDS, 5)
#define TRC_RTDS_SCHEDULE TRC_SCHED_CLASS_EVT(RTDS, 6)
-static void repl_timer_handler(void *data);
+static void cf_check repl_timer_handler(void *data);
/*
* System-wide private data, include global RunQueue/DepletedQ
@@ -1452,7 +1452,8 @@ rt_dom_cntl(
* The replenishment timer handler picks units
* from the replq and does the actual replenishment.
*/
-static void repl_timer_handler(void *data){
+static void cf_check repl_timer_handler(void *data)
+{
s_time_t now;
const struct scheduler *ops = data;
struct rt_private *prv = rt_priv(ops);
diff --git a/xen/drivers/char/ehci-dbgp.c b/xen/drivers/char/ehci-dbgp.c
index c893d246defa..a6b57fdf2d19 100644
--- a/xen/drivers/char/ehci-dbgp.c
+++ b/xen/drivers/char/ehci-dbgp.c
@@ -1289,7 +1289,7 @@ static void _ehci_dbgp_poll(struct cpu_user_regs *regs)
set_timer(&dbgp->timer, NOW() + timeout);
}
-static void ehci_dbgp_poll(void *data)
+static void cf_check ehci_dbgp_poll(void *data)
{
poll_port = data;
#ifdef run_in_exception_handler
diff --git a/xen/drivers/char/ns16550.c b/xen/drivers/char/ns16550.c
index 30596d60d4ed..990cad39fe85 100644
--- a/xen/drivers/char/ns16550.c
+++ b/xen/drivers/char/ns16550.c
@@ -111,7 +111,7 @@ struct ns16550_config_param {
static void enable_exar_enhanced_bits(const struct ns16550 *uart);
#endif
-static void ns16550_delayed_resume(void *data);
+static void cf_check ns16550_delayed_resume(void *data);
static u8 ns_read_reg(const struct ns16550 *uart, unsigned int reg)
{
@@ -229,7 +229,7 @@ static void __ns16550_poll(struct cpu_user_regs *regs)
set_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
}
-static void ns16550_poll(void *data)
+static void cf_check ns16550_poll(void *data)
{
this_cpu(poll_port) = data;
#ifdef run_in_exception_handler
@@ -532,7 +532,7 @@ static void _ns16550_resume(struct serial_port *port)
}
static int delayed_resume_tries;
-static void ns16550_delayed_resume(void *data)
+static void cf_check ns16550_delayed_resume(void *data)
{
struct serial_port *port = data;
struct ns16550 *uart = port->uart;
diff --git a/xen/drivers/cpufreq/cpufreq_ondemand.c
b/xen/drivers/cpufreq/cpufreq_ondemand.c
index cabd9ffa8886..ba03eaa2336d 100644
--- a/xen/drivers/cpufreq/cpufreq_ondemand.c
+++ b/xen/drivers/cpufreq/cpufreq_ondemand.c
@@ -172,7 +172,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s
*this_dbs_info)
}
}
-static void do_dbs_timer(void *dbs)
+static void cf_check do_dbs_timer(void *dbs)
{
struct cpu_dbs_info_s *dbs_info = (struct cpu_dbs_info_s *)dbs;
--
2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |