[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] qemu-xen: remove buffered_io_timer



qemu-xen: remove buffered_io_timer

There is no need for an additional timer to handle buffered_io requests,
they are always handled before any other ioreq anyway.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

diff --git a/i386-dm/helper2.c b/i386-dm/helper2.c
index 94237b6..96256aa 100644
--- a/i386-dm/helper2.c
+++ b/i386-dm/helper2.c
@@ -99,9 +99,7 @@ long time_offset = 0;
 
 shared_iopage_t *shared_page = NULL;
 
-#define BUFFER_IO_MAX_DELAY  100
 buffered_iopage_t *buffered_io_page = NULL;
-QEMUTimer *buffered_io_timer;
 
 /* the evtchn fd for polling */
 int xce_handle = -1;
@@ -495,15 +493,6 @@ static void __handle_buffered_iopage(CPUState *env)
     }
 }
 
-static void handle_buffered_io(void *opaque)
-{
-    CPUState *env = opaque;
-
-    __handle_buffered_iopage(env);
-    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
-                  qemu_get_clock(rt_clock));
-}
-
 static void cpu_handle_ioreq(void *opaque)
 {
     extern int shutdown_requested;
@@ -559,10 +548,6 @@ int main_loop(void)
 
     main_loop_prepare();
 
-    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
-                                      cpu_single_env);
-    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
-
     if (evtchn_fd != -1)
         qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
 
@@ -584,7 +569,7 @@ int main_loop(void)
 
         /* Pull all outstanding ioreqs through the system */
         handle_buffered_pio();
-        handle_buffered_io(env);
+        __handle_buffered_iopage(env);
         main_loop_wait(1); /* For the select() on events */
 
         /* Save the device state */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.