summaryrefslogtreecommitdiff
path: root/drivers/misc/fastrpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/fastrpc.c')
-rw-r--r--drivers/misc/fastrpc.c556
1 files changed, 509 insertions, 47 deletions
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index d60b176ffa95..d80ada8cac09 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -17,6 +17,7 @@
#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/qcom_scm.h>
#include <uapi/misc/fastrpc.h>
#define ADSP_DOMAIN_ID (0)
@@ -25,16 +26,22 @@
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
+#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
#define FASTRPC_MAX_CRCLIST 64
#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
+#define FASTRPC_DSP_UTILITIES_HANDLE 2
#define FASTRPC_CTXID_MASK (0xFF0)
#define INIT_FILELEN_MAX (2 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
#define ADSP_MMAP_ADD_PAGES 0x1000
+#define DSP_UNSUPPORTED_API (0x80000414)
+/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
+#define FASTRPC_MAX_DSP_ATTRIBUTES (256)
+#define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
@@ -72,13 +79,15 @@
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
+#define FASTRPC_RMID_INIT_MEM_MAP 10
+#define FASTRPC_RMID_INIT_MEM_UNMAP 11
/* Protection Domain(PD) ids */
#define AUDIO_PD (0) /* also GUEST_OS PD? */
#define USER_PD (1)
#define SENSORS_PD (2)
-#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
+#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
"sdsp", "cdsp"};
@@ -92,9 +101,20 @@ struct fastrpc_invoke_buf {
u32 pgidx; /* index to start of contiguous region */
};
-struct fastrpc_remote_arg {
- u64 pv;
- u64 len;
+struct fastrpc_remote_dmahandle {
+ s32 fd; /* dma handle fd */
+ u32 offset; /* dma handle offset */
+ u32 len; /* dma handle length */
+};
+
+struct fastrpc_remote_buf {
+ u64 pv; /* buffer pointer */
+ u64 len; /* length of buffer */
+};
+
+union fastrpc_remote_arg {
+ struct fastrpc_remote_buf buf;
+ struct fastrpc_remote_dmahandle dma;
};
struct fastrpc_mmap_rsp_msg {
@@ -108,12 +128,29 @@ struct fastrpc_mmap_req_msg {
s32 num;
};
+struct fastrpc_mem_map_req_msg {
+ s32 pgid;
+ s32 fd;
+ s32 offset;
+ u32 flags;
+ u64 vaddrin;
+ s32 num;
+ s32 data_len;
+};
+
struct fastrpc_munmap_req_msg {
s32 pgid;
u64 vaddr;
u64 size;
};
+struct fastrpc_mem_unmap_req_msg {
+ s32 pgid;
+ s32 fd;
+ u64 vaddrin;
+ u64 len;
+};
+
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
@@ -170,6 +207,8 @@ struct fastrpc_map {
u64 size;
void *va;
u64 len;
+ u64 raddr;
+ u32 attr;
struct kref refcount;
};
@@ -189,7 +228,7 @@ struct fastrpc_invoke_ctx {
struct work_struct put_work;
struct fastrpc_msg msg;
struct fastrpc_user *fl;
- struct fastrpc_remote_arg *rpra;
+ union fastrpc_remote_arg *rpra;
struct fastrpc_map **maps;
struct fastrpc_buf *buf;
struct fastrpc_invoke_args *args;
@@ -207,13 +246,28 @@ struct fastrpc_session_ctx {
struct fastrpc_channel_ctx {
int domain_id;
int sesscount;
+ int vmcount;
+ u32 perms;
+ struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
struct rpmsg_device *rpdev;
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
spinlock_t lock;
struct idr ctx_idr;
struct list_head users;
- struct miscdevice miscdev;
struct kref refcount;
+ /* Flag if dsp attributes are cached */
+ bool valid_attributes;
+ u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
+ struct fastrpc_device *secure_fdevice;
+ struct fastrpc_device *fdevice;
+ bool secure;
+ bool unsigned_support;
+};
+
+struct fastrpc_device {
+ struct fastrpc_channel_ctx *cctx;
+ struct miscdevice miscdev;
+ bool secure;
};
struct fastrpc_user {
@@ -228,6 +282,7 @@ struct fastrpc_user {
int tgid;
int pd;
+ bool is_secure_dev;
/* Lock for lists */
spinlock_t lock;
/* lock for allocations */
@@ -241,6 +296,20 @@ static void fastrpc_free_map(struct kref *ref)
map = container_of(ref, struct fastrpc_map, refcount);
if (map->table) {
+ if (map->attr & FASTRPC_ATTR_SECUREMAP) {
+ struct qcom_scm_vmperm perm;
+ int err = 0;
+
+ perm.vmid = QCOM_SCM_VMID_HLOS;
+ perm.perm = QCOM_SCM_PERM_RWX;
+ err = qcom_scm_assign_mem(map->phys, map->size,
+ &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
+ if (err) {
+ dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+ map->phys, map->size, err);
+ return;
+ }
+ }
dma_buf_unmap_attachment(map->attach, map->table,
DMA_BIDIRECTIONAL);
dma_buf_detach(map->buf, map->attach);
@@ -262,7 +331,8 @@ static void fastrpc_map_get(struct fastrpc_map *map)
kref_get(&map->refcount);
}
-static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
+
+static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
{
struct fastrpc_map *map = NULL;
@@ -270,7 +340,6 @@ static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
mutex_lock(&fl->mutex);
list_for_each_entry(map, &fl->maps, node) {
if (map->fd == fd) {
- fastrpc_map_get(map);
*ppmap = map;
mutex_unlock(&fl->mutex);
return 0;
@@ -281,6 +350,17 @@ static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
return -ENOENT;
}
+static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
+ struct fastrpc_map **ppmap)
+{
+ int ret = fastrpc_map_lookup(fl, fd, ppmap);
+
+ if (!ret)
+ fastrpc_map_get(*ppmap);
+
+ return ret;
+}
+
static void fastrpc_buf_free(struct fastrpc_buf *buf)
{
dma_free_coherent(buf->dev, buf->size, buf->virt,
@@ -353,7 +433,7 @@ static void fastrpc_context_free(struct kref *ref)
ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
cctx = ctx->cctx;
- for (i = 0; i < ctx->nscalars; i++)
+ for (i = 0; i < ctx->nbufs; i++)
fastrpc_map_put(ctx->maps[i]);
if (ctx->buf)
@@ -617,7 +697,7 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
};
static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
- u64 len, struct fastrpc_map **ppmap)
+ u64 len, u32 attr, struct fastrpc_map **ppmap)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
@@ -659,6 +739,22 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
map->len = len;
kref_init(&map->refcount);
+ if (attr & FASTRPC_ATTR_SECUREMAP) {
+ /*
+ * If subsystem VMIDs are defined in DTSI, then do
+ * hyp_assign from HLOS to those VM(s)
+ */
+ unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
+
+ map->attr = attr;
+ err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
+ fl->cctx->vmperms, fl->cctx->vmcount);
+ if (err) {
+ dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
+ map->phys, map->size, err);
+ goto map_err;
+ }
+ }
spin_lock(&fl->lock);
list_add_tail(&map->node, &fl->maps);
spin_unlock(&fl->lock);
@@ -682,7 +778,7 @@ get_err:
* >>>>>> START of METADATA <<<<<<<<<
* +---------------------------------+
* | Arguments |
- * | type:(struct fastrpc_remote_arg)|
+ * | type:(union fastrpc_remote_arg)|
* | (0 - N) |
* +---------------------------------+
* | Invoke Buffer list |
@@ -707,7 +803,7 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
{
int size = 0;
- size = (sizeof(struct fastrpc_remote_arg) +
+ size = (sizeof(struct fastrpc_remote_buf) +
sizeof(struct fastrpc_invoke_buf) +
sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
sizeof(u64) * FASTRPC_MAX_FDLIST +
@@ -743,16 +839,13 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
int i, err;
for (i = 0; i < ctx->nscalars; ++i) {
- /* Make sure reserved field is set to 0 */
- if (ctx->args[i].reserved)
- return -EINVAL;
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
ctx->args[i].length == 0)
continue;
err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
- ctx->args[i].length, &ctx->maps[i]);
+ ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
if (err) {
dev_err(dev, "Error Creating map %d\n", err);
return -EINVAL;
@@ -762,10 +855,20 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
return 0;
}
+static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
+{
+ return (struct fastrpc_invoke_buf *)(&pra[len]);
+}
+
+static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
+{
+ return (struct fastrpc_phy_page *)(&buf[len]);
+}
+
static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
{
struct device *dev = ctx->fl->sctx->dev;
- struct fastrpc_remote_arg *rpra;
+ union fastrpc_remote_arg *rpra;
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
int inbufs, i, oix, err = 0;
@@ -789,9 +892,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
return err;
rpra = ctx->buf->virt;
- list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
- pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
- sizeof(*rpra));
+ list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
+ pages = fastrpc_phy_page_start(list, ctx->nscalars);
args = (uintptr_t)ctx->buf->virt + metalen;
rlen = pkt_size - metalen;
ctx->rpra = rpra;
@@ -802,8 +904,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
i = ctx->olaps[oix].raix;
len = ctx->args[i].length;
- rpra[i].pv = 0;
- rpra[i].len = len;
+ rpra[i].buf.pv = 0;
+ rpra[i].buf.len = len;
list[i].num = len ? 1 : 0;
list[i].pgidx = i;
@@ -813,7 +915,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
if (ctx->maps[i]) {
struct vm_area_struct *vma = NULL;
- rpra[i].pv = (u64) ctx->args[i].ptr;
+ rpra[i].buf.pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
mmap_read_lock(current->mm);
@@ -840,7 +942,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
if (rlen < mlen)
goto bail;
- rpra[i].pv = args - ctx->olaps[oix].offset;
+ rpra[i].buf.pv = args - ctx->olaps[oix].offset;
pages[i].addr = ctx->buf->phys -
ctx->olaps[oix].offset +
(pkt_size - rlen);
@@ -854,7 +956,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
}
if (i < inbufs && !ctx->maps[i]) {
- void *dst = (void *)(uintptr_t)rpra[i].pv;
+ void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
void *src = (void *)(uintptr_t)ctx->args[i].ptr;
if (!kernel) {
@@ -870,12 +972,15 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
}
for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
- rpra[i].pv = (u64) ctx->args[i].ptr;
- rpra[i].len = ctx->args[i].length;
list[i].num = ctx->args[i].length ? 1 : 0;
list[i].pgidx = i;
- pages[i].addr = ctx->maps[i]->phys;
- pages[i].size = ctx->maps[i]->size;
+ if (ctx->maps[i]) {
+ pages[i].addr = ctx->maps[i]->phys;
+ pages[i].size = ctx->maps[i]->size;
+ }
+ rpra[i].dma.fd = ctx->args[i].fd;
+ rpra[i].dma.len = ctx->args[i].length;
+ rpra[i].dma.offset = (u64) ctx->args[i].ptr;
}
bail:
@@ -888,16 +993,26 @@ bail:
static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
u32 kernel)
{
- struct fastrpc_remote_arg *rpra = ctx->rpra;
- int i, inbufs;
+ union fastrpc_remote_arg *rpra = ctx->rpra;
+ struct fastrpc_user *fl = ctx->fl;
+ struct fastrpc_map *mmap = NULL;
+ struct fastrpc_invoke_buf *list;
+ struct fastrpc_phy_page *pages;
+ u64 *fdlist;
+ int i, inbufs, outbufs, handles;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+ outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
+ handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
+ list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
+ pages = fastrpc_phy_page_start(list, ctx->nscalars);
+ fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
for (i = inbufs; i < ctx->nbufs; ++i) {
if (!ctx->maps[i]) {
- void *src = (void *)(uintptr_t)rpra[i].pv;
+ void *src = (void *)(uintptr_t)rpra[i].buf.pv;
void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
- u64 len = rpra[i].len;
+ u64 len = rpra[i].buf.len;
if (!kernel) {
if (copy_to_user((void __user *)dst, src, len))
@@ -908,6 +1023,13 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
}
}
+ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
+ if (!fdlist[i])
+ break;
+ if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
+ fastrpc_map_put(mmap);
+ }
+
return 0;
}
@@ -1016,6 +1138,24 @@ bail:
return err;
}
+static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
+{
+ /* Check if the device node is non-secure and channel is secure*/
+ if (!fl->is_secure_dev && fl->cctx->secure) {
+ /*
+ * Allow untrusted applications to offload only to Unsigned PD when
+ * channel is configured as secure and block untrusted apps on channel
+ * that does not support unsigned PD offload
+ */
+ if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
+ dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int fastrpc_init_create_process(struct fastrpc_user *fl,
char __user *argp)
{
@@ -1035,6 +1175,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
u32 siglen;
} inbuf;
u32 sc;
+ bool unsigned_module = false;
args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
if (!args)
@@ -1045,6 +1186,14 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
goto err;
}
+ if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
+ unsigned_module = true;
+
+ if (is_session_rejected(fl, unsigned_module)) {
+ err = -ECONNREFUSED;
+ goto err;
+ }
+
if (init.filelen > INIT_FILELEN_MAX) {
err = -EINVAL;
goto err;
@@ -1059,7 +1208,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
fl->pd = USER_PD;
if (init.filelen && init.filefd) {
- err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
+ err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
if (err)
goto err;
}
@@ -1168,7 +1317,6 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
args[0].ptr = (u64)(uintptr_t) &tgid;
args[0].length = sizeof(tgid);
args[0].fd = -1;
- args[0].reserved = 0;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
@@ -1220,10 +1368,14 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
- struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_device *fdevice;
struct fastrpc_user *fl = NULL;
unsigned long flags;
+ fdevice = miscdev_to_fdevice(filp->private_data);
+ cctx = fdevice->cctx;
+
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (!fl)
return -ENOMEM;
@@ -1240,6 +1392,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&fl->user);
fl->tgid = current->tgid;
fl->cctx = cctx;
+ fl->is_secure_dev = fdevice->secure;
fl->sctx = fastrpc_session_alloc(cctx);
if (!fl->sctx) {
@@ -1311,7 +1464,6 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
args[0].ptr = (u64)(uintptr_t) &tgid;
args[0].length = sizeof(tgid);
args[0].fd = -1;
- args[0].reserved = 0;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
fl->pd = pd;
@@ -1349,6 +1501,107 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
+ uint32_t dsp_attr_buf_len)
+{
+ struct fastrpc_invoke_args args[2] = { 0 };
+
+ /* Capability filled in userspace */
+ dsp_attr_buf[0] = 0;
+
+ args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
+ args[0].length = sizeof(dsp_attr_buf_len);
+ args[0].fd = -1;
+ args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
+ args[1].length = dsp_attr_buf_len;
+ args[1].fd = -1;
+ fl->pd = 1;
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
+ FASTRPC_SCALARS(0, 1, 1), args);
+}
+
+static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
+ struct fastrpc_user *fl)
+{
+ struct fastrpc_channel_ctx *cctx = fl->cctx;
+ uint32_t attribute_id = cap->attribute_id;
+ uint32_t *dsp_attributes;
+ unsigned long flags;
+ uint32_t domain = cap->domain;
+ int err;
+
+ spin_lock_irqsave(&cctx->lock, flags);
+ /* check if we already have queried dsp for attributes */
+ if (cctx->valid_attributes) {
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&cctx->lock, flags);
+
+ dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
+ if (!dsp_attributes)
+ return -ENOMEM;
+
+ err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
+ if (err == DSP_UNSUPPORTED_API) {
+ dev_info(&cctx->rpdev->dev,
+ "Warning: DSP capabilities not supported on domain: %d\n", domain);
+ kfree(dsp_attributes);
+ return -EOPNOTSUPP;
+ } else if (err) {
+ dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
+ kfree(dsp_attributes);
+ return err;
+ }
+
+ spin_lock_irqsave(&cctx->lock, flags);
+ memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
+ cctx->valid_attributes = true;
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ kfree(dsp_attributes);
+done:
+ cap->capability = cctx->dsp_attributes[attribute_id];
+ return 0;
+}
+
+static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_ioctl_capability cap = {0};
+ int err = 0;
+
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ return -EFAULT;
+
+ cap.capability = 0;
+ if (cap.domain >= FASTRPC_DEV_MAX) {
+ dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
+ cap.domain, err);
+ return -ECHRNG;
+ }
+
+ /* Fastrpc Capablities does not support modem domain */
+ if (cap.domain == MDSP_DOMAIN_ID) {
+ dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
+ return -ECHRNG;
+ }
+
+ if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
+ dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
+ cap.attribute_id, err);
+ return -EOVERFLOW;
+ }
+
+ err = fastrpc_get_info_from_kernel(&cap, fl);
+ if (err)
+ return err;
+
+ if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
struct fastrpc_req_munmap *req)
{
@@ -1491,6 +1744,134 @@ err_invoke:
return err;
}
+static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
+{
+ struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
+ struct fastrpc_map *map = NULL, *m;
+ struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
+ int err = 0;
+ u32 sc;
+ struct device *dev = fl->sctx->dev;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(map, m, &fl->maps, node) {
+ if ((req->fd < 0 || map->fd == req->fd) && (map->raddr == req->vaddr))
+ break;
+ map = NULL;
+ }
+
+ spin_unlock(&fl->lock);
+
+ if (!map) {
+ dev_err(dev, "map not in list\n");
+ return -EINVAL;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.len = map->len;
+ req_msg.vaddrin = map->raddr;
+ req_msg.fd = map->fd;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ fastrpc_map_put(map);
+ if (err)
+ dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
+
+ return err;
+}
+
+static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_mem_unmap req;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ return fastrpc_req_mem_unmap_impl(fl, &req);
+}
+
+static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
+ struct fastrpc_mem_map_req_msg req_msg = { 0 };
+ struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
+ struct fastrpc_mem_unmap req_unmap = { 0 };
+ struct fastrpc_phy_page pages = { 0 };
+ struct fastrpc_mem_map req;
+ struct device *dev = fl->sctx->dev;
+ struct fastrpc_map *map = NULL;
+ int err;
+ u32 sc;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ /* create SMMU mapping */
+ err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
+ if (err) {
+ dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
+ return err;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.fd = req.fd;
+ req_msg.offset = req.offset;
+ req_msg.vaddrin = req.vaddrin;
+ map->va = (void *) (uintptr_t) req.vaddrin;
+ req_msg.flags = req.flags;
+ req_msg.num = sizeof(pages);
+ req_msg.data_len = 0;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ pages.addr = map->phys;
+ pages.size = map->size;
+
+ args[1].ptr = (u64) (uintptr_t) &pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64) (uintptr_t) &pages;
+ args[2].length = 0;
+
+ args[3].ptr = (u64) (uintptr_t) &rsp_msg;
+ args[3].length = sizeof(rsp_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
+ if (err) {
+ dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
+ req.fd, req.vaddrin, map->size);
+ goto err_invoke;
+ }
+
+ /* update the buffer to be able to deallocate the memory on the DSP */
+ map->raddr = rsp_msg.vaddr;
+
+ /* let the client know the address to use */
+ req.vaddrout = rsp_msg.vaddr;
+
+ if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ /* unmap the memory and release the buffer */
+ req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
+ req_unmap.length = map->size;
+ fastrpc_req_mem_unmap_impl(fl, &req_unmap);
+ return -EFAULT;
+ }
+
+ return 0;
+
+err_invoke:
+ fastrpc_map_put(map);
+
+ return err;
+}
+
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1520,6 +1901,15 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_MUNMAP:
err = fastrpc_req_munmap(fl, argp);
break;
+ case FASTRPC_IOCTL_MEM_MAP:
+ err = fastrpc_req_mem_map(fl, argp);
+ break;
+ case FASTRPC_IOCTL_MEM_UNMAP:
+ err = fastrpc_req_mem_unmap(fl, argp);
+ break;
+ case FASTRPC_IOCTL_GET_DSP_INFO:
+ err = fastrpc_get_dsp_info(fl, argp);
+ break;
default:
err = -ENOTTY;
break;
@@ -1615,12 +2005,41 @@ static struct platform_driver fastrpc_cb_driver = {
},
};
+static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
+ bool is_secured, const char *domain)
+{
+ struct fastrpc_device *fdev;
+ int err;
+
+ fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
+ if (!fdev)
+ return -ENOMEM;
+
+ fdev->secure = is_secured;
+ fdev->cctx = cctx;
+ fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ fdev->miscdev.fops = &fastrpc_fops;
+ fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
+ domain, is_secured ? "-secure" : "");
+ err = misc_register(&fdev->miscdev);
+ if (!err) {
+ if (is_secured)
+ cctx->secure_fdevice = fdev;
+ else
+ cctx->fdevice = fdev;
+ }
+
+ return err;
+}
+
static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct device *rdev = &rpdev->dev;
struct fastrpc_channel_ctx *data;
- int i, err, domain_id = -1;
+ int i, err, domain_id = -1, vmcount;
const char *domain;
+ bool secure_dsp;
+ unsigned int vmids[FASTRPC_MAX_VMIDS];
err = of_property_read_string(rdev->of_node, "label", &domain);
if (err) {
@@ -1640,18 +2059,53 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -EINVAL;
}
+ vmcount = of_property_read_variable_u32_array(rdev->of_node,
+ "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
+ if (vmcount < 0)
+ vmcount = 0;
+ else if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->miscdev.minor = MISC_DYNAMIC_MINOR;
- data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
- domains[domain_id]);
- data->miscdev.fops = &fastrpc_fops;
- err = misc_register(&data->miscdev);
- if (err) {
- kfree(data);
- return err;
+ if (vmcount) {
+ data->vmcount = vmcount;
+ data->perms = BIT(QCOM_SCM_VMID_HLOS);
+ for (i = 0; i < data->vmcount; i++) {
+ data->vmperms[i].vmid = vmids[i];
+ data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
+ }
+ }
+
+ secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
+ data->secure = secure_dsp;
+
+ switch (domain_id) {
+ case ADSP_DOMAIN_ID:
+ case MDSP_DOMAIN_ID:
+ case SDSP_DOMAIN_ID:
+ /* Unsigned PD offloading is only supported on CDSP*/
+ data->unsigned_support = false;
+ err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
+ if (err)
+ goto fdev_error;
+ break;
+ case CDSP_DOMAIN_ID:
+ data->unsigned_support = true;
+ /* Create both device nodes so that we can allow both Signed and Unsigned PD */
+ err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
+ if (err)
+ goto fdev_error;
+
+ err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
+ if (err)
+ goto fdev_error;
+ break;
+ default:
+ err = -EINVAL;
+ goto fdev_error;
}
kref_init(&data->refcount);
@@ -1665,6 +2119,9 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
data->rpdev = rpdev;
return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
+fdev_error:
+ kfree(data);
+ return err;
}
static void fastrpc_notify_users(struct fastrpc_user *user)
@@ -1688,7 +2145,12 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
fastrpc_notify_users(user);
spin_unlock_irqrestore(&cctx->lock, flags);
- misc_deregister(&cctx->miscdev);
+ if (cctx->fdevice)
+ misc_deregister(&cctx->fdevice->miscdev);
+
+ if (cctx->secure_fdevice)
+ misc_deregister(&cctx->secure_fdevice->miscdev);
+
of_platform_depopulate(&rpdev->dev);
cctx->rpdev = NULL;