@@ -332,6 +332,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head interrupted;
struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
@@ -711,6 +712,40 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
return ERR_PTR(ret);
}
+static struct fastrpc_invoke_ctx *fastrpc_context_restore_interrupted(
+ struct fastrpc_user *fl, struct fastrpc_invoke *inv)
+{
+ struct fastrpc_invoke_ctx *ctx = NULL, *ictx = NULL, *n;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(ictx, n, &fl->interrupted, node) {
+ if (ictx->pid == current->pid) {
+ if (inv->sc != ictx->sc || ictx->fl != fl) {
+ dev_err(ictx->fl->sctx->dev,
+ "interrupted sc (0x%x) or fl (%pK) does not match with invoke sc (0x%x) or fl (%pK)\n",
+ ictx->sc, ictx->fl, inv->sc, fl);
+ spin_unlock(&fl->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ ctx = ictx;
+ list_del(&ctx->node);
+ list_add_tail(&ctx->node, &fl->pending);
+ break;
+ }
+ }
+ spin_unlock(&fl->lock);
+ return ctx;
+}
+
+static void fastrpc_context_save_interrupted(
+ struct fastrpc_invoke_ctx *ctx)
+{
+ spin_lock(&ctx->fl->lock);
+ list_del(&ctx->node);
+ list_add_tail(&ctx->node, &ctx->fl->interrupted);
+ spin_unlock(&ctx->fl->lock);
+}
+
static struct sg_table *
fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
@@ -1261,6 +1296,14 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
return -EPERM;
}
+ if (!kernel) {
+ ctx = fastrpc_context_restore_interrupted(fl, inv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ if (ctx)
+ goto wait;
+ }
+
ctx = fastrpc_context_alloc(fl, kernel, sc, invoke);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1284,6 +1327,7 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
goto bail;
PERF_END);
+wait:
if (kernel) {
if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
err = -ETIMEDOUT;
@@ -1320,6 +1364,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
if (err == -ERESTARTSYS) {
+ if (ctx)
+ fastrpc_context_save_interrupted(ctx);
+
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);
list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
@@ -1441,7 +1488,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err)
@@ -1569,7 +1616,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err)
@@ -1620,6 +1667,25 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
spin_unlock_irqrestore(&cctx->lock, flags);
}
+static void fastrpc_context_list_free(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_ctx *ctx, *n;
+
+ list_for_each_entry_safe(ctx, n, &fl->interrupted, node) {
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
+
+ list_for_each_entry_safe(ctx, n, &fl->pending, node) {
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
+}
+
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
@@ -1633,7 +1699,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1642,7 +1708,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
{
struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
struct fastrpc_channel_ctx *cctx = fl->cctx;
- struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
struct fastrpc_buf *buf, *b;
unsigned long flags;
@@ -1656,10 +1721,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
if (fl->init_mem)
fastrpc_buf_free(fl->init_mem);
- list_for_each_entry_safe(ctx, n, &fl->pending, node) {
- list_del(&ctx->node);
- fastrpc_context_put(ctx);
- }
+ fastrpc_context_list_free(fl);
list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
@@ -1700,6 +1762,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
spin_lock_init(&fl->lock);
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
+ INIT_LIST_HEAD(&fl->interrupted);
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
@@ -1781,7 +1844,7 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1812,7 +1875,7 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
}
ioctl.inv = inv;
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, false, &ioctl);
kfree(args);
@@ -1853,7 +1916,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
perf_kernel = (u64 *)(uintptr_t)einv.perf_kernel;
if (perf_kernel)
fl->profile = true;
- einv.inv.args = (__u64)args;
+ einv.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, false, &einv);
kfree(args);
break;
@@ -1883,7 +1946,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
ioctl.inv.handle = FASTRPC_DSP_UTILITIES_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(0, 1, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
return fastrpc_internal_invoke(fl, true, &ioctl);
}
@@ -1986,7 +2049,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (!err) {
@@ -2080,7 +2143,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2165,7 +2228,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
fastrpc_map_put(map);
@@ -2234,7 +2297,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
ioctl.inv.handle = FASTRPC_INIT_HANDLE;
ioctl.inv.sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
- ioctl.inv.args = (__u64)args;
+ ioctl.inv.args = (u64)args;
err = fastrpc_internal_invoke(fl, true, &ioctl);
if (err) {
@@ -2555,6 +2618,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
ctx->retval = -EPIPE;
complete(&ctx->work);
}
+ list_for_each_entry(ctx, &user->interrupted, node) {
+ ctx->retval = -EPIPE;
+ complete(&ctx->work);
+ }
spin_unlock(&user->lock);
}