+};
+
+static LIST_HEAD(xen_virtio_devices);
+static DEFINE_SPINLOCK(xen_virtio_lock);
+
#define XEN_GRANT_ADDR_OFF 0x8000000000000000ULL
static inline dma_addr_t grant_to_dma(grant_ref_t grant)
@@ -25,6 +39,25 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma)
return (grant_ref_t)((dma & ~XEN_GRANT_ADDR_OFF) >> PAGE_SHIFT);
}
+static struct xen_virtio_data *find_xen_virtio_data(struct device *dev)
+{
+ struct xen_virtio_data *data = NULL;
+ bool found = false;
+
+ spin_lock(&xen_virtio_lock);
+
+ list_for_each_entry( data, &xen_virtio_devices, list) {
+ if (data->dev == dev) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock(&xen_virtio_lock);
+
+ return found ? data : NULL;
+}
+
/*
* DMA ops for Xen virtio frontends.
*
@@ -43,48 +76,78 @@ static void *xen_virtio_dma_alloc(struct device *dev,
size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
- unsigned int n_pages = PFN_UP(size);
- unsigned int i;
+ struct xen_virtio_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
unsigned long pfn;
grant_ref_t grant;
- void *ret;
+ void *ret = NULL;
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (!ret)
+ data = find_xen_virtio_data(dev);
+ if (!data)
return NULL;
+ spin_lock(&data->lock);
+
+ if (unlikely(data->broken))
+ goto out;
+
+ ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
+ if (!ret)
+ goto out;
+
pfn = virt_to_pfn(ret);
if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
- free_pages((unsigned long)ret, get_order(size));
- return NULL;
+ free_pages_exact(ret, n_pages * PAGE_SIZE);
+ ret = NULL;
+ goto out;
}
for (i = 0; i < n_pages; i++) {
- gnttab_grant_foreign_access_ref(grant + i, 0,
+ gnttab_grant_foreign_access_ref(grant + i, data->dev_domid,
pfn_to_gfn(pfn + i), 0);
}
*dma_handle = grant_to_dma(grant);
+out:
+ spin_unlock(&data->lock);
+
return ret;
}
static void xen_virtio_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- unsigned int n_pages = PFN_UP(size);
- unsigned int i;
+ struct xen_virtio_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
grant_ref_t grant;
+ data = find_xen_virtio_data(dev);
+ if (!data)
+ return;
+
+ spin_lock(&data->lock);
+
+ if (unlikely(data->broken))
+ goto out;
+
grant = dma_to_grant(dma_handle);
- for (i = 0; i < n_pages; i++)
- gnttab_end_foreign_access_ref(grant + i);
+ for (i = 0; i < n_pages; i++) {
+ if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
+ dev_alert(dev, "Grant still in use by backend domain,
disabled for further use\n");
+ data->broken = true;
+ goto out;
+ }
+ }
gnttab_free_grant_reference_seq(grant, n_pages);
- free_pages((unsigned long)vaddr, get_order(size));
+ free_pages_exact(vaddr, n_pages * PAGE_SIZE);
+
+out:
+ spin_unlock(&data->lock);
}
static struct page *xen_virtio_dma_alloc_pages(struct device *dev, size_t size,
@@ -108,28 +171,71 @@ static dma_addr_t xen_virtio_dma_map_page(struct device
*dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
+ struct xen_virtio_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
grant_ref_t grant;
+ dma_addr_t dma_handle = DMA_MAPPING_ERROR;
+
+ BUG_ON(dir == DMA_NONE);
+
+ data = find_xen_virtio_data(dev);
+ if (!data)
+ return DMA_MAPPING_ERROR;
+
+ spin_lock(&data->lock);
- if (gnttab_alloc_grant_references(1, &grant))
- return 0;
+ if (unlikely(data->broken))
+ goto out;
- gnttab_grant_foreign_access_ref(grant, 0, xen_page_to_gfn(page),
- dir == DMA_TO_DEVICE);
+ if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
+ goto out;
- return grant_to_dma(grant) + offset;
+ for (i = 0; i < n_pages; i++) {
+ gnttab_grant_foreign_access_ref(grant + i, data->dev_domid,
+ xen_page_to_gfn(page) + i, dir ==
DMA_TO_DEVICE);
+ }
+
+ dma_handle = grant_to_dma(grant) + offset;
+
+out:
+ spin_unlock(&data->lock);
+
+ return dma_handle;
}
static void xen_virtio_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
+ struct xen_virtio_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
grant_ref_t grant;
+ BUG_ON(dir == DMA_NONE);
+
+ data = find_xen_virtio_data(dev);
+ if (!data)
+ return;
+
+ spin_lock(&data->lock);
+
+ if (unlikely(data->broken))
+ goto out;
+
grant = dma_to_grant(dma_handle);
- gnttab_end_foreign_access_ref(grant);
+ for (i = 0; i < n_pages; i++) {
+ if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
+ dev_alert(dev, "Grant still in use by backend domain,
disabled for further use\n");
+ data->broken = true;
+ goto out;
+ }
+ }
+
+ gnttab_free_grant_reference_seq(grant, n_pages);
- gnttab_free_grant_reference(grant);
+out:
+ spin_unlock(&data->lock);
}
static int xen_virtio_dma_map_sg(struct device *dev, struct scatterlist *sg,
@@ -149,7 +255,7 @@ static void xen_virtio_dma_unmap_sg(struct device *dev,
struct scatterlist *sg,
static int xen_virtio_dma_dma_supported(struct device *dev, u64 mask)
{
- return 1;
+ return mask == DMA_BIT_MASK(64);
}
static const struct dma_map_ops xen_virtio_dma_ops = {
@@ -166,9 +272,61 @@ static const struct dma_map_ops xen_virtio_dma_ops = {
.dma_supported = xen_virtio_dma_dma_supported,
};
+bool xen_is_virtio_device(struct device *dev)
+{
+ /* XXX Handle only DT devices for now */
+ if (!dev->of_node)
+ return false;
+
+ if (!of_device_is_compatible(dev->of_node, "virtio,mmio"))
+ return false;
+
+ return of_property_read_bool(dev->of_node, "xen,dev-domid");
+}
+EXPORT_SYMBOL_GPL(xen_is_virtio_device);
+
void xen_virtio_setup_dma_ops(struct device *dev)
{
+ struct xen_virtio_data *data;
+ uint32_t dev_domid;
+
+ data = find_xen_virtio_data(dev);
+ if (data) {
+ dev_err(dev, "xen_virtio data is already created\n");
+ return;
+ }
+
+ if (dev_is_pci(dev)) {
+ /* XXX Leave it hard wired to dom0 for now */
+ dev_domid = 0;
+ } else if (dev->of_node) {
+ if (of_property_read_u32(dev->of_node, "xen,dev-domid",
&dev_domid)) {
+ dev_err(dev, "xen,dev-domid property is not present\n");
+ goto err;
+ }
+ } else
+ /* The ACPI case is not supported */
+ goto err;