[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [bug report] xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE



> -----Original Message-----
> From: Dan Carpenter [mailto:dan.carpenter@xxxxxxxxxx]
> Sent: 07 June 2018 09:32
> To: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
> Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Subject: [bug report] xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE
> 
> Hello Paul Durrant,
> 
> The patch 3ad0876554ca: "xen/privcmd: add
> IOCTL_PRIVCMD_MMAP_RESOURCE" from May 9, 2018, leads to the
> following
> static checker warning:
> 
>       drivers/xen/privcmd.c:827 privcmd_ioctl_mmap_resource()
>       warn: passing casted pointer 'pfns' to
> 'xen_remap_domain_mfn_array()' 64 vs 32.
> 
> drivers/xen/privcmd.c
>    746  static long privcmd_ioctl_mmap_resource(struct file *file, void __user
> *udata)
>    747  {
>    748          struct privcmd_data *data = file->private_data;
>    749          struct mm_struct *mm = current->mm;
>    750          struct vm_area_struct *vma;
>    751          struct privcmd_mmap_resource kdata;
>    752          xen_pfn_t *pfns = NULL;
>    753          struct xen_mem_acquire_resource xdata;
>    754          int rc;
>    755
>    756          if (copy_from_user(&kdata, udata, sizeof(kdata)))
>    757                  return -EFAULT;
>    758
>    759          /* If restriction is in place, check the domid matches */
>    760          if (data->domid != DOMID_INVALID && data->domid !=
> kdata.dom)
>    761                  return -EPERM;
>    762
>    763          down_write(&mm->mmap_sem);
>    764
>    765          vma = find_vma(mm, kdata.addr);
>    766          if (!vma || vma->vm_ops != &privcmd_vm_ops) {
>    767                  rc = -EINVAL;
>    768                  goto out;
>    769          }
>    770
>    771          pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
>    772          if (!pfns) {
>    773                  rc = -ENOMEM;
>    774                  goto out;
>    775          }
>    776
>    777          if (xen_feature(XENFEAT_auto_translated_physmap)) {
>    778                  unsigned int nr = DIV_ROUND_UP(kdata.num,
> XEN_PFN_PER_PAGE);
>    779                  struct page **pages;
>    780                  unsigned int i;
>    781
>    782                  rc = alloc_empty_pages(vma, nr);
>    783                  if (rc < 0)
>    784                          goto out;
>    785
>    786                  pages = vma->vm_private_data;
>    787                  for (i = 0; i < kdata.num; i++) {
>    788                          xen_pfn_t pfn =
>    789                                  page_to_xen_pfn(pages[i / 
> XEN_PFN_PER_PAGE]);
>    790
>    791                          pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
>    792                  }
>    793          } else
>    794                  vma->vm_private_data = PRIV_VMA_LOCKED;
>    795
>    796          memset(&xdata, 0, sizeof(xdata));
>    797          xdata.domid = kdata.dom;
>    798          xdata.type = kdata.type;
>    799          xdata.id = kdata.id;
>    800          xdata.frame = kdata.idx;
>    801          xdata.nr_frames = kdata.num;
>    802          set_xen_guest_handle(xdata.frame_list, pfns);
>    803
>    804          xen_preemptible_hcall_begin();
>    805          rc = HYPERVISOR_memory_op(XENMEM_acquire_resource,
> &xdata);
>    806          xen_preemptible_hcall_end();
>    807
>    808          if (rc)
>    809                  goto out;
>    810
>    811          if (xen_feature(XENFEAT_auto_translated_physmap)) {
>    812                  struct remap_pfn r = {
>    813                          .mm = vma->vm_mm,
>    814                          .pages = vma->vm_private_data,
>    815                          .prot = vma->vm_page_prot,
>    816                  };
>    817
>    818                  rc = apply_to_page_range(r.mm, kdata.addr,
>    819                                           kdata.num << PAGE_SHIFT,
>    820                                           remap_pfn_fn, &r);
>    821          } else {
>    822                  unsigned int domid =
>    823                          (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
>    824                          DOMID_SELF : kdata.dom;
>    825                  int num;
>    826
>    827                  num = xen_remap_domain_mfn_array(vma,
>    828                                                   kdata.addr & 
> PAGE_MASK,
>    829                                                   pfns, kdata.num, 
> (int *)pfns,
>                                                          ^^^^             
> ^^^^^^^^^^^
> I'm a newbie to this code, but I don't understand how it can be right...
> The (int *)pfns argument is used to store integer error codes, but that
> will only write to the first half of the buffer?  Unless this only works
> on 32 bit CPUs?

No, it's supposed to work on 64 bit too and that is a very good point. The cast 
is still fine since the buffer re-use is safe but it needs to be correctly 
cast...

> 
>    830                                                   vma->vm_page_prot,
>    831                                                   domid,
>    832                                                   
> vma->vm_private_data);
>    833                  if (num < 0)
>    834                          rc = num;
>    835                  else if (num != kdata.num) {
>    836                          unsigned int i;
>    837
>    838                          for (i = 0; i < num; i++) {
>    839                                  rc = pfns[i];

...here too.

TBH though I think I'll just allocate a separate array. I never liked this 
scheme, I was just following previous style.

Thanks for pointing this out,

  Paul

>    840                                  if (rc < 0)
>    841                                          break;
>    842                          }
>    843                  } else
>    844                          rc = 0;
>    845          }
>    846
>    847  out:
>    848          up_write(&mm->mmap_sem);
>    849          kfree(pfns);
>    850
>    851          return rc;
>    852  }
> 
> regards,
> dan carpenter

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.