@@ -333,6 +333,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head interrupted;
struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
@@ -712,6 +713,40 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
return ERR_PTR(ret);
}
+static struct fastrpc_invoke_ctx *fastrpc_context_restore_interrupted(
+ struct fastrpc_user *fl, struct fastrpc_invoke *inv)
+{
+ struct fastrpc_invoke_ctx *ctx = NULL, *ictx = NULL, *n;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(ictx, n, &fl->interrupted, node) {
+ if (ictx->pid == current->pid) {
+ if (inv->sc != ictx->sc || ictx->fl != fl) {
+ dev_err(ictx->fl->sctx->dev,
+ "interrupted sc (0x%x) or fl (%pK) does not match with invoke sc (0x%x) or fl (%pK)\n",
+ ictx->sc, ictx->fl, inv->sc, fl);
+ spin_unlock(&fl->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ ctx = ictx;
+ list_del(&ctx->node);
+ list_add_tail(&ctx->node, &fl->pending);
+ break;
+ }
+ }
+ spin_unlock(&fl->lock);
+ return ctx;
+}
+
+static void fastrpc_context_save_interrupted(
+ struct fastrpc_invoke_ctx *ctx)
+{
+ spin_lock(&ctx->fl->lock);
+ list_del(&ctx->node);
+ list_add_tail(&ctx->node, &ctx->fl->interrupted);
+ spin_unlock(&ctx->fl->lock);
+}
+
static struct sg_table *
fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
@@ -1264,6 +1299,14 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
return -EPERM;
}
+ if (!kernel) {
+ ctx = fastrpc_context_restore_interrupted(fl, inv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ if (ctx)
+ goto wait;
+ }
+
ctx = fastrpc_context_alloc(fl, kernel, sc, invoke);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1287,6 +1330,7 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
goto bail;
PERF_END);
+wait:
if (kernel) {
if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
err = -ETIMEDOUT;
@@ -1323,6 +1367,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
if (err == -ERESTARTSYS) {
+ if (ctx)
+ fastrpc_context_save_interrupted(ctx);
+
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);
list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
@@ -1444,7 +1491,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err)
@@ -1577,7 +1624,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err)
@@ -1628,6 +1675,25 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
spin_unlock_irqrestore(&cctx->lock, flags);
}
+static void fastrpc_context_list_free(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_ctx *ctx, *n;
+
+ list_for_each_entry_safe(ctx, n, &fl->interrupted, node) {
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
+
+ list_for_each_entry_safe(ctx, n, &fl->pending, node) {
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
+}
+
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
@@ -1641,7 +1707,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1650,7 +1716,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
{
struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
struct fastrpc_channel_ctx *cctx = fl->cctx;
- struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
struct fastrpc_buf *buf, *b;
unsigned long flags;
@@ -1664,10 +1729,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
if (fl->init_mem)
fastrpc_buf_free(fl->init_mem);
- list_for_each_entry_safe(ctx, n, &fl->pending, node) {
- list_del(&ctx->node);
- fastrpc_context_put(ctx);
- }
+ fastrpc_context_list_free(fl);
list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
@@ -1708,6 +1770,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
spin_lock_init(&fl->lock);
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
+ INIT_LIST_HEAD(&fl->interrupted);
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
@@ -1789,7 +1852,7 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1820,7 +1883,7 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
}
ioctl.inv = inv;
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, false, &ioctl);
kfree(args);
@@ -1872,7 +1935,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
perf_kernel = (u64 *)(uintptr_t)einv.perf_kernel;
if (perf_kernel)
fl->profile = true;
- einv.inv.args = (__u64)args;
+ einv.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, false, &einv);
kfree(args);
break;
@@ -1902,7 +1965,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
ioctl.inv.handle = FASTRPC_DSP_UTILITIES_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(0, 1, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -2005,7 +2068,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (!err) {
@@ -2103,7 +2166,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2184,7 +2247,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
fastrpc_map_put(map);
@@ -2253,7 +2316,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2574,6 +2637,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
ctx->retval = -EPIPE;
complete(&ctx->work);
}
+ list_for_each_entry(ctx, &user->interrupted, node) {
+ ctx->retval = -EPIPE;
+ complete(&ctx->work);
+ }
spin_unlock(&user->lock);
}