# HG changeset patch # User dietmar.hahn@xxxxxxxxxxxxxxxxxxx # Node ID 19da775d3a9eb2f977a06722ea83677a314d6fed # Parent 1a3df0faa4a609ca582538248c40c3ec3ff93cce Fixes to common mini-os sources to get a big-endian mini-os running on ia64. This is not for check in! Only for interested developer! Signed-off-by: Dietmar Hahn diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/console/xencons_ring.c --- a/extras/mini-os/console/xencons_ring.c Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/console/xencons_ring.c Tue Mar 20 15:38:42 2007 +0100 @@ -7,6 +7,14 @@ #include #include #include + + + +/* Start WRAPPER */ +#define get_xencons_interface(p, item) SWAP((p)->item) +#define set_xencons_interface(p, item, val) (p)->item = SWAP(val) +/* End WRAPPER */ + /* TODO - need to define BUG_ON for whole mini-os, need crash-dump as well */ @@ -28,8 +36,8 @@ int xencons_ring_send_no_notify(const ch int sent = 0; struct xencons_interface *intf = xencons_interface(); XENCONS_RING_IDX cons, prod; - cons = intf->out_cons; - prod = intf->out_prod; + cons = get_xencons_interface(intf, out_cons); + prod = get_xencons_interface(intf, out_prod); mb(); BUG_ON((prod - cons) > sizeof(intf->out)); @@ -37,7 +45,7 @@ int xencons_ring_send_no_notify(const ch intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++]; wmb(); - intf->out_prod = prod; + set_xencons_interface(intf, out_prod, prod); return sent; } @@ -58,8 +66,8 @@ static void handle_input(evtchn_port_t p struct xencons_interface *intf = xencons_interface(); XENCONS_RING_IDX cons, prod; - cons = intf->in_cons; - prod = intf->in_prod; + cons = get_xencons_interface(intf, in_cons); + prod = get_xencons_interface(intf, in_prod); mb(); BUG_ON((prod - cons) > sizeof(intf->in)); @@ -69,7 +77,7 @@ static void handle_input(evtchn_port_t p } mb(); - intf->in_cons = cons; + set_xencons_interface(intf, in_cons, cons); notify_daemon(); diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/events.c --- a/extras/mini-os/events.c Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/events.c Tue Mar 20 15:38:42 2007 +0100 @@ -24,6 +24,48 @@ #define NR_EVS 1024 + +/* START WRAPPER */ +static inline int +HPV_evtchn_bind_virq(uint32_t virq, uint32_t vcpu, evtchn_port_t* port) +{ + evtchn_bind_virq_t op = { .virq = SWAP(virq), .vcpu = SWAP(vcpu) }; + int err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &op); + *port = SWAP(op.port); + return err; +} + +static inline int +HPV_evtchn_close(int port) +{ + struct evtchn_close close = { .port = SWAP(port) }; + return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); +} +static inline int +HPV_evtchn_alloc_unbound(domid_t domid, evtchn_port_t* port) +{ + evtchn_alloc_unbound_t op; + op.dom = SWAP((domid_t)DOMID_SELF); + op.remote_dom = SWAP(domid); + int err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op); + *port = SWAP(op.port); + return err; +} + +static inline int +HPV_evtchn_bind_interdomain(domid_t domid, evtchn_port_t remote_port, + evtchn_port_t* local_port) +{ + evtchn_bind_interdomain_t op; + op.remote_dom = SWAP(domid); + op.remote_port = SWAP(remote_port); + int err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &op); + *local_port = SWAP(op.local_port); + return err; +} +/* END WRAPPER */ + + /* this represents a event handler. Chaining or sharing is not allowed */ typedef struct _ev_action_t { evtchn_handler_t handler; @@ -36,6 +78,7 @@ void default_handler(evtchn_port_t port, static unsigned long bound_ports[NR_EVS/(8*sizeof(unsigned long))]; + void unbind_all_ports(void) { int i; @@ -44,10 +87,8 @@ void unbind_all_ports(void) { if (test_and_clear_bit(i, bound_ports)) { - struct evtchn_close close; mask_evtchn(i); - close.port = i; - HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); + HPV_evtchn_close(i); } } } @@ -86,7 +127,6 @@ evtchn_port_t bind_evtchn(evtchn_port_t ev_actions[port].data = data; wmb(); ev_actions[port].handler = handler; - /* Finally unmask the port */ unmask_evtchn(port); @@ -104,19 +144,16 @@ void unbind_evtchn(evtchn_port_t port ) int bind_virq(uint32_t virq, evtchn_handler_t handler, void *data) { - evtchn_bind_virq_t op; - - /* Try to bind the virq to a port */ - op.virq = virq; - op.vcpu = smp_processor_id(); - - if ( HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &op) != 0 ) + + evtchn_port_t port; + + if ( HPV_evtchn_bind_virq(virq, smp_processor_id(), &port ) != 0) { printk("Failed to bind virtual IRQ %d\n", virq); return 1; - } - set_bit(op.port,bound_ports); - bind_evtchn(op.port, handler, data); + } + set_bit(port,bound_ports); + bind_evtchn(port, handler, data); return 0; } @@ -167,13 +204,10 @@ int evtchn_alloc_unbound(domid_t pal, ev int evtchn_alloc_unbound(domid_t pal, evtchn_handler_t handler, void *data, evtchn_port_t *port) { - evtchn_alloc_unbound_t op; - op.dom = DOMID_SELF; - op.remote_dom = pal; - int err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op); + int err = HPV_evtchn_alloc_unbound(pal, port); if (err) return err; - *port = bind_evtchn(op.port, handler, data); + bind_evtchn(*port, handler, data); return err; } @@ -184,15 +218,21 @@ int evtchn_bind_interdomain(domid_t pal, evtchn_handler_t handler, void *data, evtchn_port_t *local_port) { - evtchn_bind_interdomain_t op; - op.remote_dom = pal; - op.remote_port = remote_port; - int err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &op); + evtchn_port_t port; + int err = HPV_evtchn_bind_interdomain(pal, remote_port, &port); if (err) return err; - set_bit(op.local_port,bound_ports); - evtchn_port_t port = op.local_port; + set_bit(port,bound_ports); clear_evtchn(port); /* Without, handler gets invoked now! */ *local_port = bind_evtchn(port, handler, data); return err; } + + /* NEW HAHN */ +int evtchn_release(evtchn_port_t port) +{ + int err = HPV_evtchn_close(port); + unbind_evtchn(port); + return err; +} + diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/gnttab.c --- a/extras/mini-os/gnttab.c Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/gnttab.c Tue Mar 20 15:38:42 2007 +0100 @@ -19,14 +19,56 @@ #include #include + +/* Start WRAPPER + * Maybe the wrappers have to be moved to a header file. */ +inline void +grant_entry_setup(grant_entry_t* gr, domid_t domid, uint32_t frame, uint16_t flags) +{ + gr->frame = SWAP(frame); + gr->domid = SWAP(domid); + wmb(); + gr->flags = SWAP(flags); +} +inline uint16_t +grant_entry_get_flags(grant_entry_t* gr) +{ + return SWAP(gr->flags); +} +inline uint32_t +grant_entry_get_frame(grant_entry_t* gr) +{ + return SWAP(gr->frame); +} +inline uint16_t +grant_entry_cmpxchg_flags(grant_entry_t* gr, uint16_t flags) +{ + uint16_t nflags; + nflags = synch_cmpxchg(&gr->flags, SWAP(flags), 0); + return SWAP(nflags); +} +inline void +gnttab_setup_table_setup(struct gnttab_setup_table* t, + unsigned long* frames, uint32_t nr_frames) +{ + t->dom = SWAP((domid_t)DOMID_SELF); + t->nr_frames = SWAP(nr_frames); + set_xen_guest_handle(t->frame_list, (void*)SWAP((unsigned long)frames)); +} +/* End WRAPPER */ + + + + + #define NR_RESERVED_ENTRIES 8 -/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #ifdef __ia64__ #define NR_GRANT_FRAMES 1 #else #define NR_GRANT_FRAMES 4 #endif + #define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t)) static grant_entry_t *gnttab_table; @@ -54,14 +96,12 @@ gnttab_grant_access(domid_t domid, unsig grant_ref_t ref; ref = get_free_entry(); - gnttab_table[ref].frame = frame; - gnttab_table[ref].domid = domid; - wmb(); - readonly *= GTF_readonly; - gnttab_table[ref].flags = GTF_permit_access | readonly; - - return ref; -} + uint16_t flags = (readonly * GTF_readonly) | (uint16_t) GTF_permit_access; + grant_entry_setup(&(gnttab_table[ref]), domid, frame, flags); + + return ref; +} + grant_ref_t gnttab_grant_transfer(domid_t domid, unsigned long pfn) @@ -69,10 +109,7 @@ gnttab_grant_transfer(domid_t domid, uns grant_ref_t ref; ref = get_free_entry(); - gnttab_table[ref].frame = pfn; - gnttab_table[ref].domid = domid; - wmb(); - gnttab_table[ref].flags = GTF_accept_transfer; + grant_entry_setup(&(gnttab_table[ref]), domid, pfn, GTF_accept_transfer); return ref; } @@ -82,15 +119,14 @@ gnttab_end_access(grant_ref_t ref) { u16 flags, nflags; - nflags = gnttab_table[ref].flags; + nflags = grant_entry_get_flags(&(gnttab_table[ref])); do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk("WARNING: g.e. still in use!\n"); return 0; } - } while ((nflags = synch_cmpxchg(&gnttab_table[ref].flags, flags, 0)) != - flags); - + } while ((nflags = grant_entry_cmpxchg_flags(&(gnttab_table[ref]), flags)) + != flags); put_free_entry(ref); return 1; } @@ -101,8 +137,10 @@ gnttab_end_transfer(grant_ref_t ref) unsigned long frame; u16 flags; - while (!((flags = gnttab_table[ref].flags) & GTF_transfer_committed)) { - if (synch_cmpxchg(&gnttab_table[ref].flags, flags, 0) == flags) { + while (!((flags = grant_entry_get_flags(&(gnttab_table[ref]))) & + GTF_transfer_committed)) { + if (grant_entry_cmpxchg_flags(&(gnttab_table[ref]), flags) + == flags) { printk("Release unused transfer grant.\n"); put_free_entry(ref); return 0; @@ -111,12 +149,12 @@ gnttab_end_transfer(grant_ref_t ref) /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { - flags = gnttab_table[ref].flags; + flags = grant_entry_get_flags(&(gnttab_table[ref])); } /* Read the frame number /after/ reading completion status. */ rmb(); - frame = gnttab_table[ref].frame; + frame = grant_entry_get_frame(&(gnttab_table[ref])); put_free_entry(ref); @@ -157,11 +195,11 @@ init_gnttab(void) for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++) put_free_entry(i); - setup.dom = DOMID_SELF; - setup.nr_frames = NR_GRANT_FRAMES; - set_xen_guest_handle(setup.frame_list, frames); + gnttab_setup_table_setup(&setup, frames, NR_GRANT_FRAMES); HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); gnttab_table = map_frames(frames, NR_GRANT_FRAMES); printk("gnttab_table mapped at %p.\n", gnttab_table); } + + diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/hypervisor.c --- a/extras/mini-os/hypervisor.c Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/hypervisor.c Tue Mar 20 15:38:42 2007 +0100 @@ -29,14 +29,24 @@ #include #include -#define active_evtchns(cpu,sh,idx) \ - ((sh)->evtchn_pending[idx] & \ - ~(sh)->evtchn_mask[idx]) +/* START WRAPPER */ +#define get_shared_info_vec(s, item, idx) SWAP((s)->item[(idx)]) +#define xchg_vcpu_info_evtchn_pending_sel(v, new) \ + SWAP((unsigned long) xchg(&(v)->evtchn_pending_sel, new)) +#define set_vcpu_info_evtchn_upcall_pending(v, val) \ + (v)->evtchn_upcall_pending = SWAP((uint8_t)1) + +#define active_evtchns(cpu, sh, idx) \ + (SWAP((sh)->evtchn_pending[idx]) & \ + SWAP(~(sh)->evtchn_mask[idx])) + +/* END WRAPPER */ + void do_hypervisor_callback(struct pt_regs *regs) { - u32 l1, l2; - unsigned int l1i, l2i, port; + unsigned long l1, l2, l1i, l2i; + unsigned int port; int cpu = 0; shared_info_t *s = HYPERVISOR_shared_info; vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; @@ -44,7 +54,8 @@ void do_hypervisor_callback(struct pt_re vcpu_info->evtchn_upcall_pending = 0; /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ - l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); + l1 = xchg_vcpu_info_evtchn_pending_sel(vcpu_info, 0); + while ( l1 != 0 ) { l1i = __ffs(l1); @@ -82,7 +93,7 @@ inline void unmask_evtchn(u32 port) if ( synch_test_bit (port, &s->evtchn_pending[0]) && !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) ) { - vcpu_info->evtchn_upcall_pending = 1; + set_vcpu_info_evtchn_upcall_pending(vcpu_info, 1); if ( !vcpu_info->evtchn_upcall_mask ) force_evtchn_callback(); } diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/include/events.h --- a/extras/mini-os/include/events.h Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/include/events.h Tue Mar 20 15:38:42 2007 +0100 @@ -36,12 +36,15 @@ int evtchn_bind_interdomain(domid_t pal, int evtchn_bind_interdomain(domid_t pal, evtchn_port_t remote_port, evtchn_handler_t handler, void *data, evtchn_port_t *local_port); + /* HAHN */ +int evtchn_release(evtchn_port_t port); + void unbind_all_ports(void); static inline int notify_remote_via_evtchn(evtchn_port_t port) { evtchn_send_t op; - op.port = port; + op.port = SWAP(port); return HYPERVISOR_event_channel_op(EVTCHNOP_send, &op); } diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/include/wait.h --- a/extras/mini-os/include/wait.h Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/include/wait.h Tue Mar 20 15:38:42 2007 +0100 @@ -74,6 +74,14 @@ static inline void wake_up(struct wait_q local_irq_restore(flags); \ } while (0) +#define remove_waiter(w) do { \ + unsigned long flags; \ + local_irq_save(flags); \ + remove_wait_queue(&w); \ + local_irq_restore(flags); \ +} while (0) + + #define wait_event(wq, condition) do{ \ unsigned long flags; \ if(condition) \ diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/minios.mk --- a/extras/mini-os/minios.mk Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/minios.mk Tue Mar 20 15:38:42 2007 +0100 @@ -11,6 +11,7 @@ DEF_CFLAGS += -D__XEN_INTERFACE_VERSION_ DEF_CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION) DEF_ASFLAGS = -D__ASSEMBLY__ +DEF_ASFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION) DEF_LDFLAGS = ifeq ($(debug),y) diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/netfront.c --- a/extras/mini-os/netfront.c Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/netfront.c Tue Mar 20 15:38:42 2007 +0100 @@ -264,13 +264,18 @@ void init_netfront(void* si) info->tx_ring_ref = gnttab_grant_access(0,virt_to_mfn(txs),0); info->rx_ring_ref = gnttab_grant_access(0,virt_to_mfn(rxs),0); - evtchn_alloc_unbound_t op; - op.dom = DOMID_SELF; - op.remote_dom = 0; - HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op); - clear_evtchn(op.port); /* Without, handler gets invoked now! */ - info->local_port = bind_evtchn(op.port, netfront_handler, NULL); - info->evtchn=op.port; + //evtchn_alloc_unbound_t op; + //op.dom = DOMID_SELF; + //op.remote_dom = 0; + //HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op); + //clear_evtchn(op.port); /* Without, handler gets invoked now! */ + //info->local_port = bind_evtchn(op.port, netfront_handler, NULL); + //info->evtchn=op.port; + if(evtchn_alloc_unbound(0, netfront_handler, NULL, &info->local_port)) + { + printk("Can not allocate eventchannel\n"); + } + info->evtchn=info->local_port; again: err = xenbus_transaction_start(&xbt); diff -r 1a3df0faa4a6 -r 19da775d3a9e extras/mini-os/xenbus/xenbus.c --- a/extras/mini-os/xenbus/xenbus.c Tue Mar 20 14:43:48 2007 +0100 +++ b/extras/mini-os/xenbus/xenbus.c Tue Mar 20 15:38:42 2007 +0100 @@ -38,6 +38,7 @@ tmpx < tmpy ? tmpx : tmpy; \ }) +//#define XENBUS_DEBUG #ifdef XENBUS_DEBUG #define DEBUG(_f, _a...) \ printk("MINI_OS(file=xenbus.c, line=%d) " _f , __LINE__, ## _a) @@ -72,11 +73,12 @@ static void memcpy_from_ring(const void memcpy(dest + c1, ring, c2); } -static inline void wait_for_watch(void) +void wait_for_watch(void) { DEFINE_WAIT(w); add_waiter(w,watch_queue); schedule(); + remove_waiter(w); wake(current); } @@ -99,33 +101,68 @@ char* xenbus_wait_for_value(const char* return NULL; } +/* START WRAPPER */ +#define get_xenstore_interface(p, item) SWAP((p)->item) +#define set_xenstore_interface(p, item, val) ((p)->item) = SWAP(val) + +static inline void xsd_sockmsg_copy_from_ring(struct xsd_sockmsg* m, + void* Ring, + int off) +{ + + memcpy_from_ring(Ring, m, off, sizeof(struct xsd_sockmsg)); + m->type = SWAP(m->type); + m->req_id = SWAP(m->req_id); + m->tx_id = SWAP(m->tx_id); + m->len = SWAP(m->len); +} + +static inline struct xsd_sockmsg* xsd_sockmsg_set(struct xsd_sockmsg* m, + uint32_t type, + uint32_t req_id, + uint32_t tx_id, + uint32_t len) +{ + m->type = SWAP(type); + m->req_id = SWAP(req_id); + m->tx_id = SWAP(tx_id); + m->len = SWAP(len); + return m; +} + +static inline struct xsd_sockmsg* xsd_sockmsg_upd(struct xsd_sockmsg* m) +{ + return xsd_sockmsg_set(m, m->type, m->req_id, m->tx_id, m->len); +} +/* END WRAPPER */ static void xenbus_thread_func(void *ign) { struct xsd_sockmsg msg; - unsigned prod = 0; + XENSTORE_RING_IDX prod=0, cons; for (;;) { - wait_event(xb_waitq, prod != xenstore_buf->rsp_prod); + wait_event(xb_waitq, prod != + get_xenstore_interface(xenstore_buf, rsp_prod)); while (1) { - prod = xenstore_buf->rsp_prod; - DEBUG("Rsp_cons %d, rsp_prod %d.\n", xenstore_buf->rsp_cons, - xenstore_buf->rsp_prod); - if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg)) + prod = get_xenstore_interface(xenstore_buf, rsp_prod); + cons = get_xenstore_interface(xenstore_buf, rsp_cons); + DEBUG("Rsp_cons %d, rsp_prod %d.\n", cons, prod); + if ((prod - cons) < sizeof(msg)) break; rmb(); - memcpy_from_ring(xenstore_buf->rsp, - &msg, - MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), - sizeof(msg)); + + xsd_sockmsg_copy_from_ring(&msg, + xenstore_buf->rsp, + MASK_XENSTORE_IDX(cons)); + DEBUG("Msg len %d, %d avail, id %d.\n", msg.len + sizeof(msg), - xenstore_buf->rsp_prod - xenstore_buf->rsp_cons, + prod - cons, msg.req_id); - if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < - sizeof(msg) + msg.len) + if (prod - cons < sizeof(msg) + msg.len) break; DEBUG("Message is good.\n"); @@ -137,13 +174,15 @@ static void xenbus_thread_func(void *ign memcpy_from_ring(xenstore_buf->rsp, payload, - MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), + MASK_XENSTORE_IDX(cons), msg.len + sizeof(msg)); path = payload + sizeof(msg); + token = path + strlen(path) + 1; - xenstore_buf->rsp_cons += msg.len + sizeof(msg); + cons = cons + msg.len + sizeof(msg); + set_xenstore_interface(xenstore_buf, rsp_cons, cons); free(payload); wake_up(&watch_queue); } @@ -153,9 +192,10 @@ static void xenbus_thread_func(void *ign req_info[msg.req_id].reply = malloc(sizeof(msg) + msg.len); memcpy_from_ring(xenstore_buf->rsp, req_info[msg.req_id].reply, - MASK_XENSTORE_IDX(xenstore_buf->rsp_cons), + MASK_XENSTORE_IDX(cons), msg.len + sizeof(msg)); - xenstore_buf->rsp_cons += msg.len + sizeof(msg); + cons = cons + msg.len+sizeof(msg); + set_xenstore_interface(xenstore_buf, rsp_cons, cons); wake_up(&req_info[msg.req_id].waitq); } } @@ -241,23 +281,23 @@ struct write_req { by xenbus as if sent atomically. The header is added automatically, using type %type, req_id %req_id, and trans_id %trans_id. */ -static void xb_write(int type, int req_id, xenbus_transaction_t trans_id, +void xb_write(int type, int req_id, xenbus_transaction_t trans_id, const struct write_req *req, int nr_reqs) { - XENSTORE_RING_IDX prod; + XENSTORE_RING_IDX prod, cons; int r; int len = 0; const struct write_req *cur_req; int req_off; int total_off; int this_chunk; - struct xsd_sockmsg m = {.type = type, .req_id = req_id, - .tx_id = trans_id }; + struct xsd_sockmsg m; struct write_req header_req = { &m, sizeof(m) }; for (r = 0; r < nr_reqs; r++) len += req[r].len; - m.len = len; + xsd_sockmsg_set(&m, type, req_id, trans_id, len); + len += sizeof(m); cur_req = &header_req; @@ -265,18 +305,18 @@ static void xb_write(int type, int req_i BUG_ON(len > XENSTORE_RING_SIZE); /* Wait for the ring to drain to the point where we can send the message. */ - prod = xenstore_buf->req_prod; - if (prod + len - xenstore_buf->req_cons > XENSTORE_RING_SIZE) + prod = get_xenstore_interface(xenstore_buf, req_prod); + cons = get_xenstore_interface(xenstore_buf, req_cons); + if (prod + len - cons > XENSTORE_RING_SIZE) { /* Wait for there to be space on the ring */ DEBUG("prod %d, len %d, cons %d, size %d; waiting.\n", - prod, len, xenstore_buf->req_cons, XENSTORE_RING_SIZE); - wait_event(xb_waitq, - xenstore_buf->req_prod + len - xenstore_buf->req_cons <= - XENSTORE_RING_SIZE); + prod, len, cons, XENSTORE_RING_SIZE); + wait_event(xb_waitq, prod + len - cons <= XENSTORE_RING_SIZE); DEBUG("Back from wait.\n"); - prod = xenstore_buf->req_prod; - } + set_xenstore_interface(xenstore_buf, req_prod, prod); + } + /* We're now guaranteed to be able to send the message without overflowing the ring. Do so. */ @@ -304,12 +344,13 @@ static void xb_write(int type, int req_i DEBUG("Complete main loop of xb_write.\n"); BUG_ON(req_off != 0); BUG_ON(total_off != len); - BUG_ON(prod > xenstore_buf->req_cons + XENSTORE_RING_SIZE); + BUG_ON(prod > cons + XENSTORE_RING_SIZE); /* Remote must see entire message before updating indexes */ wmb(); - xenstore_buf->req_prod += len; + prod = get_xenstore_interface(xenstore_buf, req_prod) + len; + set_xenstore_interface(xenstore_buf, req_prod, prod); /* Send evtchn to notify remote */ notify_remote_via_evtchn(start_info.store_evtchn); @@ -326,9 +367,10 @@ xenbus_msg_reply(int type, { int id; DEFINE_WAIT(w); - struct xsd_sockmsg *rep; + struct xsd_sockmsg *rep = NULL; id = allocate_xenbus_id(); + add_waiter(w, req_info[id].waitq); xb_write(type, id, trans, io, nr_reqs); @@ -336,7 +378,8 @@ xenbus_msg_reply(int type, schedule(); wake(current); - rep = req_info[id].reply; + rep = xsd_sockmsg_upd(req_info[id].reply); + BUG_ON(rep->req_id != id); release_xenbus_id(id); return rep; @@ -368,6 +411,7 @@ static void xenbus_debug_msg(const char { "", 1 }}; struct xsd_sockmsg *reply; + DEBUG("Start xenbus_debug_msg\n"); reply = xenbus_msg_reply(XS_DEBUG, 0, req, ARRAY_SIZE(req)); DEBUG("Got a reply, type %d, id %d, len %d.\n", reply->type, reply->req_id, reply->len); @@ -421,6 +465,20 @@ char *xenbus_read(xenbus_transaction_t x res[rep->len] = 0; free(rep); *value = res; + return NULL; +} + +/* HAHN */ +char *xenbus_mkdir(xenbus_transaction_t xbt, const char *path) +{ + struct write_req req[] = { + {path, strlen(path) + 1}, + }; + struct xsd_sockmsg *rep; + rep = xenbus_msg_reply(XS_MKDIR, xbt, req, ARRAY_SIZE(req)); + char *msg = errmsg(rep); + if (msg) return msg; + free(rep); return NULL; } @@ -625,11 +683,28 @@ static void do_rm_test(const char *path) } } +/* HAHN */ +static void do_mkdir_test(const char *path) +{ + DEBUG("Mkdir %s...\n", path); + char *msg = xenbus_mkdir(XBT_NIL, path); + if (msg) { + DEBUG("Result %s\n", msg); + free(msg); + } else { + DEBUG("Success.\n"); + } +} + /* Simple testing thing */ void test_xenbus(void) { DEBUG("Doing xenbus test.\n"); xenbus_debug_msg("Testing xenbus...\n"); + + DEBUG("Doing mkdir test.\n"); + do_mkdir_test("device/TEST"); + do_write_test("device/TEST", "OK"); DEBUG("Doing ls test.\n"); do_ls_test("device"); @@ -650,6 +725,7 @@ void test_xenbus(void) do_rm_test("device/vif/0/flibble"); do_read_test("device/vif/0/flibble"); DEBUG("(Should have said ENOENT)\n"); + } /*