[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/1] xen-hvm: Add trace to ioreq



On Thu, 30 Apr 2015, Don Slutz wrote:
> Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
> (cherry picked from commit b72adbe7510d0a30053d32334665ee887bec9e43)


Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

I'll add it to my queue

>  trace-events |  7 +++++++
>  xen-hvm.c    | 21 +++++++++++++++++++++
>  2 files changed, 28 insertions(+)
> 
> diff --git a/trace-events b/trace-events
> index 30eba92..4666dad 100644
> --- a/trace-events
> +++ b/trace-events
> @@ -932,6 +932,13 @@ xen_map_portio_range(uint32_t id, uint64_t start_addr, 
> uint64_t end_addr) "id: %
>  xen_unmap_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) 
> "id: %u start: %#"PRIx64" end: %#"PRIx64
>  xen_map_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u 
> bdf: %02x.%02x.%02x"
>  xen_unmap_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: 
> %u bdf: %02x.%02x.%02x"
> +handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t 
> data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) 
> "I/O=%p type=%d dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d 
> size=%d"
> +handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t 
> data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) 
> "I/O=%p read type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d 
> size=%d"
> +handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t 
> data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) 
> "I/O=%p write type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d 
> size=%d"
> +cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, 
> uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio 
> dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d"
> +cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t 
> size) "I/O=%p pio read reg data=%#"PRIx64" port=%#"PRIx64" size=%d"
> +cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t 
> size) "I/O=%p pio write reg data=%#"PRIx64" port=%#"PRIx64" size=%d"
> +cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, 
> uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy 
> dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d"
>  
>  # xen-mapcache.c
>  xen_map_cache(uint64_t phys_addr) "want %#"PRIx64
> diff --git a/xen-hvm.c b/xen-hvm.c
> index 315864c..6fdba98 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -832,9 +832,14 @@ static void cpu_ioreq_pio(ioreq_t *req)
>  {
>      uint32_t i;
>  
> +    trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
> +                         req->data, req->count, req->size);
> +
>      if (req->dir == IOREQ_READ) {
>          if (!req->data_is_ptr) {
>              req->data = do_inp(req->addr, req->size);
> +            trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
> +                                         req->size);
>          } else {
>              uint32_t tmp;
>  
> @@ -845,6 +850,8 @@ static void cpu_ioreq_pio(ioreq_t *req)
>          }
>      } else if (req->dir == IOREQ_WRITE) {
>          if (!req->data_is_ptr) {
> +            trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
> +                                          req->size);
>              do_outp(req->addr, req->size, req->data);
>          } else {
>              for (i = 0; i < req->count; i++) {
> @@ -861,6 +868,9 @@ static void cpu_ioreq_move(ioreq_t *req)
>  {
>      uint32_t i;
>  
> +    trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
> +                         req->data, req->count, req->size);
> +
>      if (!req->data_is_ptr) {
>          if (req->dir == IOREQ_READ) {
>              for (i = 0; i < req->count; i++) {
> @@ -933,11 +943,18 @@ static void handle_vmport_ioreq(XenIOState *state, 
> ioreq_t *req)
>  
>  static void handle_ioreq(XenIOState *state, ioreq_t *req)
>  {
> +    trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
> +                       req->addr, req->data, req->count, req->size);
> +
>      if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
>              (req->size < sizeof (target_ulong))) {
>          req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
>      }
>  
> +    if (req->dir == IOREQ_WRITE)
> +        trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
> +                                 req->addr, req->data, req->count, 
> req->size);
> +
>      switch (req->type) {
>          case IOREQ_TYPE_PIO:
>              cpu_ioreq_pio(req);
> @@ -977,6 +994,10 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req)
>          default:
>              hw_error("Invalid ioreq type 0x%x\n", req->type);
>      }
> +    if (req->dir == IOREQ_READ) {
> +        trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
> +                                req->addr, req->data, req->count, req->size);
> +    }
>  }
>  
>  static int handle_buffered_iopage(XenIOState *state)
> -- 
> 1.8.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.