@@ -61,6 +61,10 @@ struct gntdev_grant_map {
bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
+ unsigned int preserve_pages;
+
+ /* Needed to avoid allocation in gnttab_dma_free_pages(). */
+ xen_pfn_t *frames;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/*
@@ -73,8 +77,6 @@ struct gntdev_grant_map {
int dma_flags;
void *dma_vaddr;
dma_addr_t dma_bus_addr;
- /* Needed to avoid allocation in gnttab_dma_free_pages(). */
- xen_pfn_t *frames;
#endif
/* Number of live grants */
@@ -85,6 +87,8 @@ struct gntdev_grant_map {
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
int dma_flags);
+struct gntdev_grant_map *gntdev_get_alloc_from_fd(struct gntdev_priv *priv,
+ struct sg_table *sgt, int count, int dma_flags);
void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
@@ -10,14 +10,18 @@
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/delay.h>
#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
+#include <xen/mem-reservation.h>
#include "gntdev-common.h"
#include "gntdev-dmabuf.h"
@@ -46,6 +50,18 @@ struct gntdev_dmabuf {
/* dma-buf attachment of the imported buffer. */
struct dma_buf_attachment *attach;
} imp;
+ struct {
+ /* Scatter-gather table of the mapped buffer. */
+ struct sg_table *sgt;
+ /* dma-buf attachment of the mapped buffer. */
+ struct dma_buf_attachment *attach;
+ /* map table */
+ struct gntdev_grant_map *map;
+ /* frames table for memory reservation */
+ xen_pfn_t *frames;
+
+ struct gntdev_priv *priv;
+ } map;
} u;
/* Number of pages this buffer has. */
@@ -57,6 +73,7 @@ struct gntdev_dmabuf {
struct gntdev_dmabuf_wait_obj {
struct list_head next;
struct gntdev_dmabuf *gntdev_dmabuf;
+ int fd;
struct completion completion;
};
@@ -72,6 +89,10 @@ struct gntdev_dmabuf_priv {
struct list_head exp_wait_list;
/* List of imported DMA buffers. */
struct list_head imp_list;
+ /* List of mapped DMA buffers. */
+ struct list_head map_list;
+ /* List of wait objects. */
+ struct list_head map_wait_list;
/* This is the lock which protects dma_buf_xxx lists. */
struct mutex lock;
/*
@@ -88,6 +109,64 @@ struct gntdev_dmabuf_priv {
static void dmabuf_exp_release(struct kref *kref);
+static struct gntdev_dmabuf_wait_obj *
+dmabuf_map_wait_obj_find(struct gntdev_dmabuf_priv *priv, int fd)
+{
+ struct gntdev_dmabuf_wait_obj *obj, *ret = ERR_PTR(-ENOENT);
+
+ mutex_lock(&priv->lock);
+ list_for_each_entry(obj, &priv->map_wait_list, next)
+ if (obj->fd == fd) {
+ pr_debug("Found gntdev_dmabuf in the wait list\n");
+ ret = obj;
+ break;
+ }
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf_wait_obj *obj);
+
+static int
+dmabuf_map_wait_obj_set(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf, int fd)
+{
+ struct gntdev_dmabuf_wait_obj *obj;
+
+ obj = dmabuf_map_wait_obj_find(gntdev_dmabuf->priv, fd);
+ if ((!obj) || (PTR_ERR(obj) == -ENOENT)) {
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+ }
+
+ init_completion(&obj->completion);
+ obj->gntdev_dmabuf = gntdev_dmabuf;
+ obj->fd = fd;
+ mutex_lock(&priv->lock);
+ list_add(&obj->next, &priv->map_wait_list);
+ mutex_unlock(&priv->lock);
+ return 0;
+}
+
+static void dmabuf_map_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ struct gntdev_dmabuf_wait_obj *obj;
+
+ mutex_lock(&priv->lock);
+ list_for_each_entry(obj, &priv->map_wait_list, next)
+ if (obj->gntdev_dmabuf == gntdev_dmabuf) {
+ pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
+ complete_all(&obj->completion);
+ break;
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
static struct gntdev_dmabuf_wait_obj *
dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
struct gntdev_dmabuf *gntdev_dmabuf)
@@ -410,6 +489,18 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
return ret;
}
+static void dmabuf_map_free_gntdev_dmabuf(struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ if (!gntdev_dmabuf)
+ return;
+
+ kfree(gntdev_dmabuf->pages);
+
+ kvfree(gntdev_dmabuf->u.map.frames);
+ kfree(gntdev_dmabuf);
+ gntdev_dmabuf = NULL;
+}
+
static struct gntdev_grant_map *
dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
int count)
@@ -432,6 +523,113 @@ dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
return map;
}
+static void dmabuf_map_remove(struct gntdev_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ dmabuf_exp_remove_map(priv, gntdev_dmabuf->u.map.map);
+ dmabuf_map_free_gntdev_dmabuf(gntdev_dmabuf);
+}
+
+static struct gntdev_dmabuf *
+dmabuf_alloc_gntdev_from_buf(struct gntdev_priv *priv, int fd, int dmabuf_flags,
+ int count, unsigned int data_ofs)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+ struct sg_table *sgt;
+ int ret = 0;
+
+ gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ if (!gntdev_dmabuf)
+ return ERR_PTR(-ENOMEM);
+
+ gntdev_dmabuf->pages = kcalloc(count,
+ sizeof(gntdev_dmabuf->pages[0]), GFP_KERNEL);
+
+ if (!gntdev_dmabuf->pages) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ gntdev_dmabuf->u.map.frames = kvcalloc(count,
+ sizeof(gntdev_dmabuf->u.map.frames[0]), GFP_KERNEL);
+ if (!gntdev_dmabuf->u.map.frames) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if (gntdev_test_page_count(count)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dma_buf)) {
+ pr_debug("Unable to get dmabuf from fd\n");
+ ret = PTR_ERR(dma_buf);
+ goto free;
+ }
+
+ attach = dma_buf_attach(dma_buf, priv->dma_dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ ret = PTR_ERR(attach);
+ goto fail_put;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ if (sgt->sgl->offset != data_ofs) {
+ pr_debug("DMA buffer offset %d, user-space expects %d\n",
+ sgt->sgl->offset, data_ofs);
+ ret = -EINVAL;
+ goto fail_unmap;
+ }
+
+ /* Check number of pages that imported buffer has. */
+ if (attach->dmabuf->size < count << PAGE_SHIFT) {
+ pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
+ attach->dmabuf->size, count << PAGE_SHIFT);
+ ret = -EINVAL;
+ goto fail_unmap;
+ }
+
+ gntdev_dmabuf->u.map.map = gntdev_get_alloc_from_fd(priv, sgt, count,
+ dmabuf_flags);
+ if (IS_ERR_OR_NULL(gntdev_dmabuf->u.map.map)) {
+ ret = -ENOMEM;
+ goto fail_unmap;
+ };
+
+ gntdev_dmabuf->priv = priv->dmabuf_priv;
+ gntdev_dmabuf->fd = fd;
+ gntdev_dmabuf->u.map.attach = attach;
+ gntdev_dmabuf->u.map.sgt = sgt;
+ gntdev_dmabuf->dmabuf = dma_buf;
+ gntdev_dmabuf->nr_pages = count;
+ gntdev_dmabuf->u.map.priv = priv;
+
+ memcpy(gntdev_dmabuf->pages, gntdev_dmabuf->u.map.map->pages, count *
+ sizeof(gntdev_dmabuf->u.map.map->pages[0]));
+ memcpy(gntdev_dmabuf->u.map.frames, gntdev_dmabuf->u.map.map->frames, count *
+ sizeof(gntdev_dmabuf->u.map.map->frames[0]));
+
+ return gntdev_dmabuf;
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+fail_put:
+ dma_buf_put(dma_buf);
+free:
+ dmabuf_map_free_gntdev_dmabuf(gntdev_dmabuf);
+ return ERR_PTR(ret);
+}
+
static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
int count, u32 domid, u32 *refs, u32 *fd)
{
@@ -481,6 +679,117 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
return ret;
}
+static void dmabuf_release_notifier_cb(struct dma_buf *dmabuf, void *priv)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf = priv;
+
+ if (!gntdev_dmabuf)
+ return;
+
+ dmabuf_map_remove(gntdev_dmabuf->u.map.priv, gntdev_dmabuf);
+ dmabuf_map_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
+}
+
+static int dmabuf_detach_map(struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ struct dma_buf *dma_buf = gntdev_dmabuf->dmabuf;
+ long lret;
+
+ /* Wait on any implicit fences */
+ lret = dma_resv_wait_timeout(dma_buf->resv,
+ dma_resv_usage_rw(true), true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (lret == 0)
+ return -ETIME;
+ else if (lret < 0)
+ return lret;
+
+ if (gntdev_dmabuf->u.map.sgt) {
+ dma_buf_unmap_attachment(gntdev_dmabuf->u.map.attach,
+ gntdev_dmabuf->u.map.sgt, DMA_BIDIRECTIONAL);
+ }
+
+ dma_buf_detach(dma_buf, gntdev_dmabuf->u.map.attach);
+ dma_buf_put(dma_buf);
+
+ return 0;
+}
+
+static int dmabuf_map_release(struct gntdev_dmabuf *gntdev_dmabuf, bool sync)
+{
+ int ret;
+
+ if (!sync) {
+ ret = dmabuf_map_wait_obj_set(gntdev_dmabuf->priv, gntdev_dmabuf,
+ gntdev_dmabuf->fd);
+ if (ret)
+ return ret;
+ }
+
+ ret = dmabuf_detach_map(gntdev_dmabuf);
+ if (ret)
+ return ret;
+
+ if (!sync) {
+ ret = dma_buf_register_release_notifier(gntdev_dmabuf->dmabuf,
+ &dmabuf_release_notifier_cb, gntdev_dmabuf);
+ if (ret)
+ return ret;
+ } else {
+ dmabuf_map_remove(gntdev_dmabuf->u.map.priv, gntdev_dmabuf);
+ }
+
+ return 0;
+}
+
+static int dmabuf_map_refs_to_fd(struct gntdev_priv *priv, int flags,
+ int count, u32 domid, u32 *refs, u32 fd,
+ unsigned int data_ofs)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ int i, ret;
+
+ gntdev_dmabuf = dmabuf_alloc_gntdev_from_buf(priv, fd, flags, count,
+ data_ofs);
+
+ if (IS_ERR_OR_NULL(gntdev_dmabuf)) {
+ ret = PTR_ERR(gntdev_dmabuf);
+ goto fail_gntdev;
+ }
+
+ for (i = 0; i < count; i++) {
+ gntdev_dmabuf->u.map.map->grants[i].domid = domid;
+ gntdev_dmabuf->u.map.map->grants[i].ref = refs[i];
+ }
+
+ mutex_lock(&priv->lock);
+ gntdev_add_map(priv, gntdev_dmabuf->u.map.map);
+ mutex_unlock(&priv->lock);
+
+ gntdev_dmabuf->u.map.map->flags |= GNTMAP_host_map;
+#if defined(CONFIG_X86)
+ gntdev_dmabuf->u.map.map->flags |= GNTMAP_device_map;
+#endif
+
+ ret = gntdev_map_grant_pages(gntdev_dmabuf->u.map.map);
+ if (ret < 0)
+ goto fail;
+
+ mutex_lock(&priv->lock);
+ list_add(&gntdev_dmabuf->next, &priv->dmabuf_priv->map_list);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+fail:
+ mutex_lock(&priv->lock);
+ list_del(&gntdev_dmabuf->u.map.map->next);
+ mutex_unlock(&priv->lock);
+ dmabuf_detach_map(gntdev_dmabuf);
+ dmabuf_map_free_gntdev_dmabuf(gntdev_dmabuf);
+fail_gntdev:
+ return ret;
+}
+
/* DMA buffer import support. */
static int
@@ -673,14 +982,15 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
* it from the buffer's list.
*/
static struct gntdev_dmabuf *
-dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
+dmabuf_list_find_unlink(struct gntdev_dmabuf_priv *priv, struct list_head *list,
+ int fd)
{
struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
mutex_lock(&priv->lock);
- list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
+ list_for_each_entry_safe(gntdev_dmabuf, q, list, next) {
if (gntdev_dmabuf->fd == fd) {
- pr_debug("Found gntdev_dmabuf in the import list\n");
+ pr_debug("Found gntdev_dmabuf in the list\n");
ret = gntdev_dmabuf;
list_del(&gntdev_dmabuf->next);
break;
@@ -696,7 +1006,7 @@ static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
struct dma_buf_attachment *attach;
struct dma_buf *dma_buf;
- gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
+ gntdev_dmabuf = dmabuf_list_find_unlink(priv, &priv->imp_list, fd);
if (IS_ERR(gntdev_dmabuf))
return PTR_ERR(gntdev_dmabuf);
@@ -726,6 +1036,21 @@ static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
dmabuf_imp_release(priv, gntdev_dmabuf->fd);
}
+static void dmabuf_map_release_all(struct gntdev_dmabuf_priv *priv)
+{
+ struct gntdev_dmabuf *q, *gntdev_dmabuf;
+ struct gntdev_dmabuf_wait_obj *o, *obj;
+
+ list_for_each_entry_safe(obj, o, &priv->map_wait_list, next) {
+ dmabuf_exp_wait_obj_free(priv, obj);
+ }
+
+ list_for_each_entry_safe(gntdev_dmabuf, q, &priv->map_list, next) {
+ dmabuf_map_release(gntdev_dmabuf, true);
+ }
+
+}
+
/* DMA buffer IOCTL support. */
long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
@@ -769,6 +1094,47 @@ long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
return ret;
}
+long gntdev_ioctl_dmabuf_map_refs_to_buf(struct gntdev_priv *priv, int use_ptemod,
+ struct ioctl_gntdev_dmabuf_map_refs_to_buf __user *u)
+{
+ struct ioctl_gntdev_dmabuf_map_refs_to_buf op;
+ u32 *refs;
+ long ret;
+
+ if (use_ptemod) {
+ pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
+ use_ptemod);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ if (op.count <= 0)
+ return -EINVAL;
+
+ refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = dmabuf_map_refs_to_fd(priv, op.flags, op.count,
+ op.domid, refs, op.fd, op.data_ofs);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ ret = -EFAULT;
+
+out:
+ kfree(refs);
+ return ret;
+}
+
long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
{
@@ -823,6 +1189,45 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
}
+long gntdev_ioctl_dmabuf_map_release(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_map_release __user *u)
+{
+ struct ioctl_gntdev_dmabuf_map_release op;
+ struct gntdev_dmabuf *gntdev_dmabuf;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ gntdev_dmabuf = dmabuf_list_find_unlink(priv->dmabuf_priv,
+ &priv->dmabuf_priv->map_list, op.fd);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ return dmabuf_map_release(gntdev_dmabuf, false);
+}
+
+long gntdev_ioctl_dmabuf_map_wait_released(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_map_wait_released __user *u)
+{
+ struct ioctl_gntdev_dmabuf_map_wait_released op;
+ struct gntdev_dmabuf_wait_obj *obj;
+ int ret = 0;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ obj = dmabuf_map_wait_obj_find(priv->dmabuf_priv, op.fd);
+ if (IS_ERR_OR_NULL(obj))
+ return (PTR_ERR(obj) == -ENOENT) ? 0 : PTR_ERR(obj);
+
+ if (!completion_done(&obj->completion))
+ ret = dmabuf_exp_wait_obj_wait(obj, op.wait_to_ms);
+
+ if (!ret && ret != -ETIMEDOUT)
+ dmabuf_exp_wait_obj_free(priv->dmabuf_priv, obj);
+ return ret;
+}
+
struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
{
struct gntdev_dmabuf_priv *priv;
@@ -835,6 +1240,8 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
INIT_LIST_HEAD(&priv->exp_list);
INIT_LIST_HEAD(&priv->exp_wait_list);
INIT_LIST_HEAD(&priv->imp_list);
+ INIT_LIST_HEAD(&priv->map_list);
+ INIT_LIST_HEAD(&priv->map_wait_list);
priv->filp = filp;
@@ -844,5 +1251,6 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
{
dmabuf_imp_release_all(priv);
+ dmabuf_map_release_all(priv);
kfree(priv);
}
@@ -21,6 +21,9 @@ void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
struct ioctl_gntdev_dmabuf_exp_from_refs __user *u);
+long gntdev_ioctl_dmabuf_map_refs_to_buf(struct gntdev_priv *priv, int use_ptemod,
+ struct ioctl_gntdev_dmabuf_map_refs_to_buf __user *u);
+
long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_exp_wait_released __user *u);
@@ -30,4 +33,8 @@ long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_imp_release __user *u);
+long gntdev_ioctl_dmabuf_map_release(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_map_release __user *u);
+long gntdev_ioctl_dmabuf_map_wait_released(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_map_wait_released __user *u);
#endif
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -43,6 +44,7 @@
#include <xen/gntdev.h>
#include <xen/events.h>
#include <xen/page.h>
+#include <xen/mem-reservation.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -96,7 +98,11 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
return;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
- if (map->dma_vaddr) {
+ if (map->pages && map->preserve_pages) {
+ gnttab_dma_clean_page_reservation(map->count, map->pages,
+ map->frames);
+
+ } else if (map->dma_vaddr) {
struct gnttab_dma_alloc_args args;
args.dev = map->dma_dev;
@@ -216,6 +222,82 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
return NULL;
}
+struct gntdev_grant_map *gntdev_get_alloc_from_fd(struct gntdev_priv *priv,
+ struct sg_table *sgt, int count, int dma_flags)
+{
+ struct gntdev_grant_map *add;
+ int i = 0;
+ struct sg_page_iter sg_iter;
+
+ add = kzalloc(sizeof(*add), GFP_KERNEL);
+ if (!add)
+ return NULL;
+
+ add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
+ add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
+ add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
+ add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->frames = kvcalloc(count, sizeof(add->frames[0]),
+ GFP_KERNEL);
+ add->being_removed =
+ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
+ add->preserve_pages = 1;
+
+ if (add->grants == NULL ||
+ add->map_ops == NULL ||
+ add->unmap_ops == NULL ||
+ add->pages == NULL ||
+ add->frames == NULL ||
+ add->being_removed == NULL)
+ goto err;
+
+ if (use_ptemod) {
+ add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
+ GFP_KERNEL);
+ add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
+ GFP_KERNEL);
+ if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
+ goto err;
+ }
+
+ for_each_sgtable_page(sgt, &sg_iter, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+
+ add->pages[i] = page;
+ add->frames[i] = xen_page_to_gfn(page);
+ i++;
+ if (i >= count)
+ break;
+ }
+
+ if (i < count) {
+ pr_debug("Provided buffer is too small");
+ goto err;
+ }
+
+ if (gnttab_dma_reserve_pages(count, add->pages, add->frames))
+ goto err;
+
+ for (i = 0; i < count; i++) {
+ add->map_ops[i].handle = -1;
+ add->unmap_ops[i].handle = -1;
+ if (use_ptemod) {
+ add->kmap_ops[i].handle = -1;
+ add->kunmap_ops[i].handle = -1;
+ }
+ }
+
+ add->index = 0;
+ add->count = count;
+ refcount_set(&add->users, 1);
+
+ return add;
+
+err:
+ gntdev_free_map(add);
+ return NULL;
+}
+
void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
{
struct gntdev_grant_map *map;
@@ -610,6 +692,9 @@ static int gntdev_release(struct inode *inode, struct file *flip)
struct gntdev_grant_map *map;
pr_debug("priv %p\n", priv);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ gntdev_dmabuf_fini(priv->dmabuf_priv);
+#endif
mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
@@ -620,10 +705,6 @@ static int gntdev_release(struct inode *inode, struct file *flip)
}
mutex_unlock(&priv->lock);
-#ifdef CONFIG_XEN_GNTDEV_DMABUF
- gntdev_dmabuf_fini(priv->dmabuf_priv);
-#endif
-
kfree(priv);
return 0;
}
@@ -1020,6 +1101,16 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_MAP_REFS_TO_BUF:
+ return gntdev_ioctl_dmabuf_map_refs_to_buf(priv, use_ptemod, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_MAP_RELEASE:
+ return gntdev_ioctl_dmabuf_map_release(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_MAP_WAIT_RELEASED:
+ return gntdev_ioctl_dmabuf_map_wait_released(priv, ptr);
+
#endif
default:
@@ -1036,6 +1036,40 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL_GPL(gnttab_free_pages);
+int gnttab_dma_reserve_pages(int nr_pages, struct page **pages,
+ xen_pfn_t *frames)
+{
+ int ret, i;
+
+ for (i = 0; i < nr_pages; i++)
+ xenmem_reservation_scrub_page(pages[i]);
+
+ xenmem_reservation_va_mapping_reset(nr_pages, pages);
+
+ ret = xenmem_reservation_decrease(nr_pages, frames);
+ if (ret != nr_pages)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_dma_reserve_pages);
+
+int gnttab_dma_clean_page_reservation(int nr_pages, struct page **pages,
+ xen_pfn_t *frames)
+{
+ int ret;
+
+ ret = xenmem_reservation_increase(nr_pages, frames);
+ if (ret != nr_pages) {
+ pr_debug("Failed to increase reservation for DMA buffer\n");
+ return -EFAULT;
+ }
+
+ xenmem_reservation_va_mapping_update(nr_pages, pages, frames);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_dma_clean_page_reservation);
+
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/**
* gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
@@ -1071,17 +1105,11 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
args->pages[i] = page;
args->frames[i] = xen_page_to_gfn(page);
- xenmem_reservation_scrub_page(page);
}
- xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
-
- ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
- if (ret != args->nr_pages) {
- pr_debug("Failed to decrease reservation for DMA buffer\n");
- ret = -EFAULT;
+ ret = gnttab_dma_reserve_pages(args->nr_pages, args->pages, args->frames);
+ if (ret)
goto fail;
- }
ret = gnttab_pages_set_private(args->nr_pages, args->pages);
if (ret < 0)
@@ -1109,17 +1137,8 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
for (i = 0; i < args->nr_pages; i++)
args->frames[i] = page_to_xen_pfn(args->pages[i]);
- ret = xenmem_reservation_increase(args->nr_pages, args->frames);
- if (ret != args->nr_pages) {
- pr_debug("Failed to increase reservation for DMA buffer\n");
- ret = -EFAULT;
- } else {
- ret = 0;
- }
-
- xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
- args->frames);
-
+ ret = gnttab_dma_clean_page_reservation(args->nr_pages, args->pages,
+ args->frames);
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
dma_free_coherent(args->dev, size,
@@ -312,4 +312,66 @@ struct ioctl_gntdev_dmabuf_imp_release {
__u32 reserved;
};
+/*
+ * Fd mapping ioctls allows to map @fd to @refs.
+ *
+ * Allows gntdev to map scatter-gather table to the existing dma-buf
+ * file destriptor. It provides the same functionality as
+ * DMABUF_EXP_FROM_REFS_V2 ioctls,
+ * but maps sc table on top of the existing buffer memory, instead of
+ * allocting memory. This is useful when exporter should work with external
+ * buffer.
+ */
+
+#define IOCTL_GNTDEV_DMABUF_MAP_REFS_TO_BUF \
+ _IOC(_IOC_NONE, 'G', 15, \
+ sizeof(struct ioctl_gntdev_dmabuf_map_refs_to_buf))
+struct ioctl_gntdev_dmabuf_map_refs_to_buf {
+ /* IN parameters. */
+ /* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */
+ __u32 flags;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* Offset of the data in the dma-buf. */
+ __u32 data_ofs;
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* The domain ID of the grant references to be mapped. */
+ __u32 domid;
+ /* Variable IN parameter. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will release gntdev attachment to the provided buffer with file
+ * descriptor @fd, so it can be released by the owner. This is only valid
+ * for buffers created with IOCTL_GNTDEV_DMABUF_EXP_REFS_TO_BUF.
+ * Returns 0 on success, -ETIME when waiting dma_buffer fences to clean
+ * reached timeout. In this case release call should be repeated after
+ * releasing dma_buffer fences.
+ */
+#define IOCTL_GNTDEV_DMABUF_MAP_RELEASE \
+ _IOC(_IOC_NONE, 'G', 16, \
+ sizeof(struct ioctl_gntdev_dmabuf_map_release))
+struct ioctl_gntdev_dmabuf_map_release {
+ /* IN parameters */
+ __u32 fd;
+ __u32 reserved;
+};
+
+/*
+ * This will wait until gntdev release procedure is finished and buffer was
+ * released completely. This is only valid for buffers created with
+ * IOCTL_GNTDEV_DMABUF_EXP_REFS_TO_BUF.
+ */
+#define IOCTL_GNTDEV_DMABUF_MAP_WAIT_RELEASED \
+ _IOC(_IOC_NONE, 'G', 17, \
+ sizeof(struct ioctl_gntdev_dmabuf_map_wait_released))
+struct ioctl_gntdev_dmabuf_map_wait_released {
+ /* IN parameters */
+ __u32 fd;
+ __u32 wait_to_ms;
+};
+
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
@@ -250,6 +250,11 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
#endif
+int gnttab_dma_reserve_pages(int nr_pages, struct page **pages,
+ xen_pfn_t *frames);
+int gnttab_dma_clean_page_reservation(int nr_pages, struct page **pages,
+ xen_pfn_t *frames);
+
int gnttab_pages_set_private(int nr_pages, struct page **pages);
void gnttab_pages_clear_private(int nr_pages, struct page **pages);