diff options
Diffstat (limited to 'drivers/staging/android/ion')
-rw-r--r-- | drivers/staging/android/ion/Kconfig | 27 | ||||
-rw-r--r-- | drivers/staging/android/ion/Makefile | 4 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion.c | 649 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion.h | 302 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_cma_heap.c | 138 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_heap.c | 286 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_page_pool.c | 155 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_system_heap.c | 377 |
8 files changed, 0 insertions, 1938 deletions
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig deleted file mode 100644 index 989fe84a9f9d..000000000000 --- a/drivers/staging/android/ion/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -menuconfig ION - bool "Ion Memory Manager" - depends on HAS_DMA && MMU - select GENERIC_ALLOCATOR - select DMA_SHARED_BUFFER - help - Choose this option to enable the ION Memory Manager, - used by Android to efficiently allocate buffers - from userspace that can be shared between drivers. - If you're not using Android its probably safe to - say N here. - -config ION_SYSTEM_HEAP - bool "Ion system heap" - depends on ION - help - Choose this option to enable the Ion system heap. The system heap - is backed by pages from the buddy allocator. If in doubt, say Y. - -config ION_CMA_HEAP - bool "Ion CMA heap support" - depends on ION && DMA_CMA - help - Choose this option to enable CMA heaps with Ion. This heap is backed - by the Contiguous Memory Allocator (CMA). If your system has these - regions, you should say Y here. diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile deleted file mode 100644 index 5f4487b1a224..000000000000 --- a/drivers/staging/android/ion/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_ION) += ion.o ion_heap.o -obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o -obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c deleted file mode 100644 index e1fe03ceb7f1..000000000000 --- a/drivers/staging/android/ion/ion.c +++ /dev/null @@ -1,649 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ION Memory Allocator - * - * Copyright (C) 2011 Google, Inc. - */ - -#include <linux/debugfs.h> -#include <linux/device.h> -#include <linux/dma-buf.h> -#include <linux/err.h> -#include <linux/export.h> -#include <linux/file.h> -#include <linux/freezer.h> -#include <linux/fs.h> -#include <linux/kthread.h> -#include <linux/list.h> -#include <linux/miscdevice.h> -#include <linux/mm.h> -#include <linux/mm_types.h> -#include <linux/rbtree.h> -#include <linux/sched/task.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> - -#include "ion.h" - -static struct ion_device *internal_dev; -static int heap_id; - -/* this function should only be called while dev->lock is held */ -static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, - struct ion_device *dev, - unsigned long len, - unsigned long flags) -{ - struct ion_buffer *buffer; - int ret; - - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) - return ERR_PTR(-ENOMEM); - - buffer->heap = heap; - buffer->flags = flags; - buffer->dev = dev; - buffer->size = len; - - ret = heap->ops->allocate(heap, buffer, len, flags); - - if (ret) { - if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) - goto err2; - - ion_heap_freelist_drain(heap, 0); - ret = heap->ops->allocate(heap, buffer, len, flags); - if (ret) - goto err2; - } - - if (!buffer->sg_table) { - WARN_ONCE(1, "This heap needs to set the sgtable"); - ret = -EINVAL; - goto err1; - } - - spin_lock(&heap->stat_lock); - heap->num_of_buffers++; - heap->num_of_alloc_bytes += len; - if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm) - heap->alloc_bytes_wm = heap->num_of_alloc_bytes; - spin_unlock(&heap->stat_lock); - - INIT_LIST_HEAD(&buffer->attachments); - mutex_init(&buffer->lock); - return buffer; - -err1: - heap->ops->free(buffer); -err2: - kfree(buffer); - return ERR_PTR(ret); -} - -void ion_buffer_destroy(struct ion_buffer *buffer) -{ - if (buffer->kmap_cnt > 0) { - pr_warn_once("%s: buffer still mapped in the kernel\n", - __func__); - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - } - buffer->heap->ops->free(buffer); - spin_lock(&buffer->heap->stat_lock); - buffer->heap->num_of_buffers--; - buffer->heap->num_of_alloc_bytes -= buffer->size; - spin_unlock(&buffer->heap->stat_lock); - - kfree(buffer); -} - -static void _ion_buffer_destroy(struct ion_buffer *buffer) -{ - struct ion_heap *heap = buffer->heap; - - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) - ion_heap_freelist_add(heap, buffer); - else - ion_buffer_destroy(buffer); -} - -static void *ion_buffer_kmap_get(struct ion_buffer *buffer) -{ - void *vaddr; - - if (buffer->kmap_cnt) { - buffer->kmap_cnt++; - return buffer->vaddr; - } - vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); - if (WARN_ONCE(!vaddr, - "heap->ops->map_kernel should return ERR_PTR on error")) - return ERR_PTR(-EINVAL); - if (IS_ERR(vaddr)) - return vaddr; - buffer->vaddr = vaddr; - buffer->kmap_cnt++; - return vaddr; -} - -static void ion_buffer_kmap_put(struct ion_buffer *buffer) -{ - buffer->kmap_cnt--; - if (!buffer->kmap_cnt) { - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - buffer->vaddr = NULL; - } -} - -static struct sg_table *dup_sg_table(struct sg_table *table) -{ - struct sg_table *new_table; - int ret, i; - struct scatterlist *sg, *new_sg; - - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } - - new_sg = new_table->sgl; - for_each_sgtable_sg(table, sg, i) { - memcpy(new_sg, sg, sizeof(*sg)); - new_sg->dma_address = 0; - new_sg = sg_next(new_sg); - } - - return new_table; -} - -static void free_duped_table(struct sg_table *table) -{ - sg_free_table(table); - kfree(table); -} - -struct ion_dma_buf_attachment { - struct device *dev; - struct sg_table *table; - struct list_head list; -}; - -static int ion_dma_buf_attach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct ion_dma_buf_attachment *a; - struct sg_table *table; - struct ion_buffer *buffer = dmabuf->priv; - - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; - - table = dup_sg_table(buffer->sg_table); - if (IS_ERR(table)) { - kfree(a); - return -ENOMEM; - } - - a->table = table; - a->dev = attachment->dev; - INIT_LIST_HEAD(&a->list); - - attachment->priv = a; - - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); - - return 0; -} - -static void ion_dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct ion_dma_buf_attachment *a = attachment->priv; - struct ion_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - free_duped_table(a->table); - - kfree(a); -} - -static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction direction) -{ - struct ion_dma_buf_attachment *a = attachment->priv; - struct sg_table *table; - int ret; - - table = a->table; - - ret = dma_map_sgtable(attachment->dev, table, direction, 0); - if (ret) - return ERR_PTR(ret); - - return table; -} - -static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *table, - enum dma_data_direction direction) -{ - dma_unmap_sgtable(attachment->dev, table, direction, 0); -} - -static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct ion_buffer *buffer = dmabuf->priv; - int ret = 0; - - if (!buffer->heap->ops->map_user) { - pr_err("%s: this heap does not define a method for mapping to userspace\n", - __func__); - return -EINVAL; - } - - if (!(buffer->flags & ION_FLAG_CACHED)) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - - mutex_lock(&buffer->lock); - /* now map it to userspace */ - ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); - mutex_unlock(&buffer->lock); - - if (ret) - pr_err("%s: failure mapping buffer to userspace\n", - __func__); - - return ret; -} - -static void ion_dma_buf_release(struct dma_buf *dmabuf) -{ - struct ion_buffer *buffer = dmabuf->priv; - - _ion_buffer_destroy(buffer); -} - -static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct ion_buffer *buffer = dmabuf->priv; - void *vaddr; - struct ion_dma_buf_attachment *a; - int ret = 0; - - /* - * TODO: Move this elsewhere because we don't always need a vaddr - */ - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto unlock; - } - mutex_unlock(&buffer->lock); - } - - mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) - dma_sync_sgtable_for_cpu(a->dev, a->table, direction); - -unlock: - mutex_unlock(&buffer->lock); - return ret; -} - -static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct ion_buffer *buffer = dmabuf->priv; - struct ion_dma_buf_attachment *a; - - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } - - mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) - dma_sync_sgtable_for_device(a->dev, a->table, direction); - mutex_unlock(&buffer->lock); - - return 0; -} - -static const struct dma_buf_ops dma_buf_ops = { - .map_dma_buf = ion_map_dma_buf, - .unmap_dma_buf = ion_unmap_dma_buf, - .mmap = ion_mmap, - .release = ion_dma_buf_release, - .attach = ion_dma_buf_attach, - .detach = ion_dma_buf_detach, - .begin_cpu_access = ion_dma_buf_begin_cpu_access, - .end_cpu_access = ion_dma_buf_end_cpu_access, -}; - -static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) -{ - struct ion_device *dev = internal_dev; - struct ion_buffer *buffer = NULL; - struct ion_heap *heap; - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - int fd; - struct dma_buf *dmabuf; - - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, - len, heap_id_mask, flags); - /* - * traverse the list of heaps available in this system in priority - * order. If the heap type is supported by the client, and matches the - * request of the caller allocate from it. Repeat until allocate has - * succeeded or all heaps have been tried - */ - len = PAGE_ALIGN(len); - - if (!len) - return -EINVAL; - - down_read(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - /* if the caller didn't specify this heap id */ - if (!((1 << heap->id) & heap_id_mask)) - continue; - buffer = ion_buffer_create(heap, dev, len, flags); - if (!IS_ERR(buffer)) - break; - } - up_read(&dev->lock); - - if (!buffer) - return -ENODEV; - - if (IS_ERR(buffer)) - return PTR_ERR(buffer); - - exp_info.ops = &dma_buf_ops; - exp_info.size = buffer->size; - exp_info.flags = O_RDWR; - exp_info.priv = buffer; - - dmabuf = dma_buf_export(&exp_info); - if (IS_ERR(dmabuf)) { - _ion_buffer_destroy(buffer); - return PTR_ERR(dmabuf); - } - - fd = dma_buf_fd(dmabuf, O_CLOEXEC); - if (fd < 0) - dma_buf_put(dmabuf); - - return fd; -} - -static int ion_query_heaps(struct ion_heap_query *query) -{ - struct ion_device *dev = internal_dev; - struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); - int ret = -EINVAL, cnt = 0, max_cnt; - struct ion_heap *heap; - struct ion_heap_data hdata; - - memset(&hdata, 0, sizeof(hdata)); - - down_read(&dev->lock); - if (!buffer) { - query->cnt = dev->heap_cnt; - ret = 0; - goto out; - } - - if (query->cnt <= 0) - goto out; - - max_cnt = query->cnt; - - plist_for_each_entry(heap, &dev->heaps, node) { - strncpy(hdata.name, heap->name, MAX_HEAP_NAME); - hdata.name[sizeof(hdata.name) - 1] = '\0'; - hdata.type = heap->type; - hdata.heap_id = heap->id; - - if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { - ret = -EFAULT; - goto out; - } - - cnt++; - if (cnt >= max_cnt) - break; - } - - query->cnt = cnt; - ret = 0; -out: - up_read(&dev->lock); - return ret; -} - -union ion_ioctl_arg { - struct ion_allocation_data allocation; - struct ion_heap_query query; -}; - -static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) -{ - switch (cmd) { - case ION_IOC_HEAP_QUERY: - if (arg->query.reserved0 || - arg->query.reserved1 || - arg->query.reserved2) - return -EINVAL; - break; - default: - break; - } - - return 0; -} - -static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int ret = 0; - union ion_ioctl_arg data; - - if (_IOC_SIZE(cmd) > sizeof(data)) - return -EINVAL; - - /* - * The copy_from_user is unconditional here for both read and write - * to do the validate. If there is no write for the ioctl, the - * buffer is cleared - */ - if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) - return -EFAULT; - - ret = validate_ioctl_arg(cmd, &data); - if (ret) { - pr_warn_once("%s: ioctl validate failed\n", __func__); - return ret; - } - - if (!(_IOC_DIR(cmd) & _IOC_WRITE)) - memset(&data, 0, sizeof(data)); - - switch (cmd) { - case ION_IOC_ALLOC: - { - int fd; - - fd = ion_alloc(data.allocation.len, - data.allocation.heap_id_mask, - data.allocation.flags); - if (fd < 0) - return fd; - - data.allocation.fd = fd; - - break; - } - case ION_IOC_HEAP_QUERY: - ret = ion_query_heaps(&data.query); - break; - default: - return -ENOTTY; - } - - if (_IOC_DIR(cmd) & _IOC_READ) { - if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) - return -EFAULT; - } - return ret; -} - -static const struct file_operations ion_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = ion_ioctl, - .compat_ioctl = compat_ptr_ioctl, -}; - -static int debug_shrink_set(void *data, u64 val) -{ - struct ion_heap *heap = data; - struct shrink_control sc; - int objs; - - sc.gfp_mask = GFP_HIGHUSER; - sc.nr_to_scan = val; - - if (!val) { - objs = heap->shrinker.count_objects(&heap->shrinker, &sc); - sc.nr_to_scan = objs; - } - - heap->shrinker.scan_objects(&heap->shrinker, &sc); - return 0; -} - -static int debug_shrink_get(void *data, u64 *val) -{ - struct ion_heap *heap = data; - struct shrink_control sc; - int objs; - - sc.gfp_mask = GFP_HIGHUSER; - sc.nr_to_scan = 0; - - objs = heap->shrinker.count_objects(&heap->shrinker, &sc); - *val = objs; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, - debug_shrink_set, "%llu\n"); - -void ion_device_add_heap(struct ion_heap *heap) -{ - struct ion_device *dev = internal_dev; - int ret; - struct dentry *heap_root; - char debug_name[64]; - - if (!heap->ops->allocate || !heap->ops->free) - pr_err("%s: can not add heap with invalid ops struct.\n", - __func__); - - spin_lock_init(&heap->free_lock); - spin_lock_init(&heap->stat_lock); - heap->free_list_size = 0; - - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) - ion_heap_init_deferred_free(heap); - - if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) { - ret = ion_heap_init_shrinker(heap); - if (ret) - pr_err("%s: Failed to register shrinker\n", __func__); - } - - heap->dev = dev; - heap->num_of_buffers = 0; - heap->num_of_alloc_bytes = 0; - heap->alloc_bytes_wm = 0; - - heap_root = debugfs_create_dir(heap->name, dev->debug_root); - debugfs_create_u64("num_of_buffers", - 0444, heap_root, - &heap->num_of_buffers); - debugfs_create_u64("num_of_alloc_bytes", - 0444, - heap_root, - &heap->num_of_alloc_bytes); - debugfs_create_u64("alloc_bytes_wm", - 0444, - heap_root, - &heap->alloc_bytes_wm); - - if (heap->shrinker.count_objects && - heap->shrinker.scan_objects) { - snprintf(debug_name, 64, "%s_shrink", heap->name); - debugfs_create_file(debug_name, - 0644, - heap_root, - heap, - &debug_shrink_fops); - } - - down_write(&dev->lock); - heap->id = heap_id++; - /* - * use negative heap->id to reverse the priority -- when traversing - * the list later attempt higher id numbers first - */ - plist_node_init(&heap->node, -heap->id); - plist_add(&heap->node, &dev->heaps); - - dev->heap_cnt++; - up_write(&dev->lock); -} -EXPORT_SYMBOL(ion_device_add_heap); - -static int ion_device_create(void) -{ - struct ion_device *idev; - int ret; - - idev = kzalloc(sizeof(*idev), GFP_KERNEL); - if (!idev) - return -ENOMEM; - - idev->dev.minor = MISC_DYNAMIC_MINOR; - idev->dev.name = "ion"; - idev->dev.fops = &ion_fops; - idev->dev.parent = NULL; - ret = misc_register(&idev->dev); - if (ret) { - pr_err("ion: failed to register misc device.\n"); - kfree(idev); - return ret; - } - - idev->debug_root = debugfs_create_dir("ion", NULL); - init_rwsem(&idev->lock); - plist_head_init(&idev->heaps); - internal_dev = idev; - return 0; -} -subsys_initcall(ion_device_create); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h deleted file mode 100644 index c199e88afc6c..000000000000 --- a/drivers/staging/android/ion/ion.h +++ /dev/null @@ -1,302 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ION Memory Allocator kernel interface header - * - * Copyright (C) 2011 Google, Inc. - */ - -#ifndef _ION_H -#define _ION_H - -#include <linux/device.h> -#include <linux/dma-direction.h> -#include <linux/kref.h> -#include <linux/mm_types.h> -#include <linux/mutex.h> -#include <linux/rbtree.h> -#include <linux/sched.h> -#include <linux/shrinker.h> -#include <linux/types.h> -#include <linux/miscdevice.h> - -#include "../uapi/ion.h" - -/** - * struct ion_buffer - metadata for a particular buffer - * @list: element in list of deferred freeable buffers - * @dev: back pointer to the ion_device - * @heap: back pointer to the heap the buffer came from - * @flags: buffer specific flags - * @private_flags: internal buffer specific flags - * @size: size of the buffer - * @priv_virt: private data to the buffer representable as - * a void * - * @lock: protects the buffers cnt fields - * @kmap_cnt: number of times the buffer is mapped to the kernel - * @vaddr: the kernel mapping if kmap_cnt is not zero - * @sg_table: the sg table for the buffer - * @attachments: list of devices attached to this buffer - */ -struct ion_buffer { - struct list_head list; - struct ion_device *dev; - struct ion_heap *heap; - unsigned long flags; - unsigned long private_flags; - size_t size; - void *priv_virt; - struct mutex lock; - int kmap_cnt; - void *vaddr; - struct sg_table *sg_table; - struct list_head attachments; -}; - -void ion_buffer_destroy(struct ion_buffer *buffer); - -/** - * struct ion_device - the metadata of the ion device node - * @dev: the actual misc device - * @lock: rwsem protecting the tree of heaps and clients - */ -struct ion_device { - struct miscdevice dev; - struct rw_semaphore lock; - struct plist_head heaps; - struct dentry *debug_root; - int heap_cnt; -}; - -/** - * struct ion_heap_ops - ops to operate on a given heap - * @allocate: allocate memory - * @free: free memory - * @map_kernel map memory to the kernel - * @unmap_kernel unmap memory to the kernel - * @map_user map memory to userspace - * - * allocate, phys, and map_user return 0 on success, -errno on error. - * map_dma and map_kernel return pointer on success, ERR_PTR on - * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in - * the buffer's private_flags when called from a shrinker. In that - * case, the pages being free'd must be truly free'd back to the - * system, not put in a page pool or otherwise cached. - */ -struct ion_heap_ops { - int (*allocate)(struct ion_heap *heap, - struct ion_buffer *buffer, unsigned long len, - unsigned long flags); - void (*free)(struct ion_buffer *buffer); - void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); - void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); - int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, - struct vm_area_struct *vma); - int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); -}; - -/** - * heap flags - flags between the heaps and core ion code - */ -#define ION_HEAP_FLAG_DEFER_FREE BIT(0) - -/** - * private flags - flags internal to ion - */ -/* - * Buffer is being freed from a shrinker function. Skip any possible - * heap-specific caching mechanism (e.g. page pools). Guarantees that - * any buffer storage that came from the system allocator will be - * returned to the system allocator. - */ -#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0) - -/** - * struct ion_heap - represents a heap in the system - * @node: rb node to put the heap on the device's tree of heaps - * @dev: back pointer to the ion_device - * @type: type of heap - * @ops: ops struct as above - * @flags: flags - * @id: id of heap, also indicates priority of this heap when - * allocating. These are specified by platform data and - * MUST be unique - * @name: used for debugging - * @shrinker: a shrinker for the heap - * @free_list: free list head if deferred free is used - * @free_list_size size of the deferred free list in bytes - * @lock: protects the free list - * @waitqueue: queue to wait on from deferred free thread - * @task: task struct of deferred free thread - * @num_of_buffers the number of currently allocated buffers - * @num_of_alloc_bytes the number of allocated bytes - * @alloc_bytes_wm the number of allocated bytes watermark - * - * Represents a pool of memory from which buffers can be made. In some - * systems the only heap is regular system memory allocated via vmalloc. - * On others, some blocks might require large physically contiguous buffers - * that are allocated from a specially reserved heap. - */ -struct ion_heap { - struct plist_node node; - struct ion_device *dev; - enum ion_heap_type type; - struct ion_heap_ops *ops; - unsigned long flags; - unsigned int id; - const char *name; - - /* deferred free support */ - struct shrinker shrinker; - struct list_head free_list; - size_t free_list_size; - spinlock_t free_lock; - wait_queue_head_t waitqueue; - struct task_struct *task; - - /* heap statistics */ - u64 num_of_buffers; - u64 num_of_alloc_bytes; - u64 alloc_bytes_wm; - - /* protect heap statistics */ - spinlock_t stat_lock; -}; - -/** - * ion_device_add_heap - adds a heap to the ion device - * @heap: the heap to add - */ -void ion_device_add_heap(struct ion_heap *heap); - -/** - * some helpers for common operations on buffers using the sg_table - * and vaddr fields - */ -void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer); -void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer); -int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, - struct vm_area_struct *vma); -int ion_heap_buffer_zero(struct ion_buffer *buffer); - -/** - * ion_heap_init_shrinker - * @heap: the heap - * - * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op - * this function will be called to setup a shrinker to shrink the freelists - * and call the heap's shrink op. - */ -int ion_heap_init_shrinker(struct ion_heap *heap); - -/** - * ion_heap_init_deferred_free -- initialize deferred free functionality - * @heap: the heap - * - * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will - * be called to setup deferred frees. Calls to free the buffer will - * return immediately and the actual free will occur some time later - */ -int ion_heap_init_deferred_free(struct ion_heap *heap); - -/** - * ion_heap_freelist_add - add a buffer to the deferred free list - * @heap: the heap - * @buffer: the buffer - * - * Adds an item to the deferred freelist. - */ -void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); - -/** - * ion_heap_freelist_drain - drain the deferred free list - * @heap: the heap - * @size: amount of memory to drain in bytes - * - * Drains the indicated amount of memory from the deferred freelist immediately. - * Returns the total amount freed. The total freed may be higher depending - * on the size of the items in the list, or lower if there is insufficient - * total memory on the freelist. - */ -size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); - -/** - * ion_heap_freelist_shrink - drain the deferred free - * list, skipping any heap-specific - * pooling or caching mechanisms - * - * @heap: the heap - * @size: amount of memory to drain in bytes - * - * Drains the indicated amount of memory from the deferred freelist immediately. - * Returns the total amount freed. The total freed may be higher depending - * on the size of the items in the list, or lower if there is insufficient - * total memory on the freelist. - * - * Unlike with @ion_heap_freelist_drain, don't put any pages back into - * page pools or otherwise cache the pages. Everything must be - * genuinely free'd back to the system. If you're free'ing from a - * shrinker you probably want to use this. Note that this relies on - * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE - * flag. - */ -size_t ion_heap_freelist_shrink(struct ion_heap *heap, - size_t size); - -/** - * ion_heap_freelist_size - returns the size of the freelist in bytes - * @heap: the heap - */ -size_t ion_heap_freelist_size(struct ion_heap *heap); - -/** - * functions for creating and destroying a heap pool -- allows you - * to keep a pool of pre allocated memory to use from your heap. Keeping - * a pool of memory that is ready for dma, ie any cached mapping have been - * invalidated from the cache, provides a significant performance benefit on - * many systems - */ - -/** - * struct ion_page_pool - pagepool struct - * @high_count: number of highmem items in the pool - * @low_count: number of lowmem items in the pool - * @high_items: list of highmem items - * @low_items: list of lowmem items - * @mutex: lock protecting this struct and especially the count - * item list - * @gfp_mask: gfp_mask to use from alloc - * @order: order of pages in the pool - * @list: plist node for list of pools - * - * Allows you to keep a pool of pre allocated pages to use from your heap. - * Keeping a pool of pages that is ready for dma, ie any cached mapping have - * been invalidated from the cache, provides a significant performance benefit - * on many systems - */ -struct ion_page_pool { - int high_count; - int low_count; - struct list_head high_items; - struct list_head low_items; - struct mutex mutex; - gfp_t gfp_mask; - unsigned int order; - struct plist_node list; -}; - -struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); -void ion_page_pool_destroy(struct ion_page_pool *pool); -struct page *ion_page_pool_alloc(struct ion_page_pool *pool); -void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); - -/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool - * @pool: the pool - * @gfp_mask: the memory type to reclaim - * @nr_to_scan: number of items to shrink in pages - * - * returns the number of items freed in pages - */ -int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, - int nr_to_scan); - -#endif /* _ION_H */ diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c deleted file mode 100644 index bf65e67ef9d8..000000000000 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ /dev/null @@ -1,138 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ION Memory Allocator CMA heap exporter - * - * Copyright (C) Linaro 2012 - * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. - */ - -#include <linux/device.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/cma.h> -#include <linux/scatterlist.h> -#include <linux/highmem.h> - -#include "ion.h" - -struct ion_cma_heap { - struct ion_heap heap; - struct cma *cma; -}; - -#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) - -/* ION CMA heap operations functions */ -static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, - unsigned long len, - unsigned long flags) -{ - struct ion_cma_heap *cma_heap = to_cma_heap(heap); - struct sg_table *table; - struct page *pages; - unsigned long size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; - unsigned long align = get_order(size); - int ret; - - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; - - pages = cma_alloc(cma_heap->cma, nr_pages, align, false); - if (!pages) - return -ENOMEM; - - if (PageHighMem(pages)) { - unsigned long nr_clear_pages = nr_pages; - struct page *page = pages; - - while (nr_clear_pages > 0) { - void *vaddr = kmap_atomic(page); - - memset(vaddr, 0, PAGE_SIZE); - kunmap_atomic(vaddr); - page++; - nr_clear_pages--; - } - } else { - memset(page_address(pages), 0, size); - } - - table = kmalloc(sizeof(*table), GFP_KERNEL); - if (!table) - goto err; - - ret = sg_alloc_table(table, 1, GFP_KERNEL); - if (ret) - goto free_mem; - - sg_set_page(table->sgl, pages, size, 0); - - buffer->priv_virt = pages; - buffer->sg_table = table; - return 0; - -free_mem: - kfree(table); -err: - cma_release(cma_heap->cma, pages, nr_pages); - return -ENOMEM; -} - -static void ion_cma_free(struct ion_buffer *buffer) -{ - struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); - struct page *pages = buffer->priv_virt; - unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; - - /* release memory */ - cma_release(cma_heap->cma, pages, nr_pages); - /* release sg table */ - sg_free_table(buffer->sg_table); - kfree(buffer->sg_table); -} - -static struct ion_heap_ops ion_cma_ops = { - .allocate = ion_cma_allocate, - .free = ion_cma_free, - .map_user = ion_heap_map_user, - .map_kernel = ion_heap_map_kernel, - .unmap_kernel = ion_heap_unmap_kernel, -}; - -static struct ion_heap *__ion_cma_heap_create(struct cma *cma) -{ - struct ion_cma_heap *cma_heap; - - cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); - - if (!cma_heap) - return ERR_PTR(-ENOMEM); - - cma_heap->heap.ops = &ion_cma_ops; - cma_heap->cma = cma; - cma_heap->heap.type = ION_HEAP_TYPE_DMA; - return &cma_heap->heap; -} - -static int __ion_add_cma_heaps(struct cma *cma, void *data) -{ - struct ion_heap *heap; - - heap = __ion_cma_heap_create(cma); - if (IS_ERR(heap)) - return PTR_ERR(heap); - - heap->name = cma_get_name(cma); - - ion_device_add_heap(heap); - return 0; -} - -static int ion_add_cma_heaps(void) -{ - cma_for_each_area(__ion_add_cma_heaps, NULL); - return 0; -} -device_initcall(ion_add_cma_heaps); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c deleted file mode 100644 index ea7e0a244ffc..000000000000 --- a/drivers/staging/android/ion/ion_heap.c +++ /dev/null @@ -1,286 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ION Memory Allocator generic heap helpers - * - * Copyright (C) 2011 Google, Inc. - */ - -#include <linux/err.h> -#include <linux/freezer.h> -#include <linux/kthread.h> -#include <linux/mm.h> -#include <linux/rtmutex.h> -#include <linux/sched.h> -#include <uapi/linux/sched/types.h> -#include <linux/scatterlist.h> -#include <linux/vmalloc.h> - -#include "ion.h" - -void *ion_heap_map_kernel(struct ion_heap *heap, - struct ion_buffer *buffer) -{ - struct sg_page_iter piter; - void *vaddr; - pgprot_t pgprot; - struct sg_table *table = buffer->sg_table; - int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; - struct page **pages = vmalloc(array_size(npages, - sizeof(struct page *))); - struct page **tmp = pages; - - if (!pages) - return ERR_PTR(-ENOMEM); - - if (buffer->flags & ION_FLAG_CACHED) - pgprot = PAGE_KERNEL; - else - pgprot = pgprot_writecombine(PAGE_KERNEL); - - for_each_sgtable_page(table, &piter, 0) { - BUG_ON(tmp - pages >= npages); - *tmp++ = sg_page_iter_page(&piter); - } - - vaddr = vmap(pages, npages, VM_MAP, pgprot); - vfree(pages); - - if (!vaddr) - return ERR_PTR(-ENOMEM); - - return vaddr; -} - -void ion_heap_unmap_kernel(struct ion_heap *heap, - struct ion_buffer *buffer) -{ - vunmap(buffer->vaddr); -} - -int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, - struct vm_area_struct *vma) -{ - struct sg_page_iter piter; - struct sg_table *table = buffer->sg_table; - unsigned long addr = vma->vm_start; - int ret; - - for_each_sgtable_page(table, &piter, vma->vm_pgoff) { - struct page *page = sg_page_iter_page(&piter); - - ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, - vma->vm_page_prot); - if (ret) - return ret; - addr += PAGE_SIZE; - if (addr >= vma->vm_end) - return 0; - } - - return 0; -} - -static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) -{ - void *addr = vmap(pages, num, VM_MAP, pgprot); - - if (!addr) - return -ENOMEM; - memset(addr, 0, PAGE_SIZE * num); - vunmap(addr); - - return 0; -} - -static int ion_heap_sglist_zero(struct sg_table *sgt, pgprot_t pgprot) -{ - int p = 0; - int ret = 0; - struct sg_page_iter piter; - struct page *pages[32]; - - for_each_sgtable_page(sgt, &piter, 0) { - pages[p++] = sg_page_iter_page(&piter); - if (p == ARRAY_SIZE(pages)) { - ret = ion_heap_clear_pages(pages, p, pgprot); - if (ret) - return ret; - p = 0; - } - } - if (p) - ret = ion_heap_clear_pages(pages, p, pgprot); - - return ret; -} - -int ion_heap_buffer_zero(struct ion_buffer *buffer) -{ - struct sg_table *table = buffer->sg_table; - pgprot_t pgprot; - - if (buffer->flags & ION_FLAG_CACHED) - pgprot = PAGE_KERNEL; - else - pgprot = pgprot_writecombine(PAGE_KERNEL); - - return ion_heap_sglist_zero(table, pgprot); -} - -void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) -{ - spin_lock(&heap->free_lock); - list_add(&buffer->list, &heap->free_list); - heap->free_list_size += buffer->size; - spin_unlock(&heap->free_lock); - wake_up(&heap->waitqueue); -} - -size_t ion_heap_freelist_size(struct ion_heap *heap) -{ - size_t size; - - spin_lock(&heap->free_lock); - size = heap->free_list_size; - spin_unlock(&heap->free_lock); - - return size; -} - -static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, - bool skip_pools) -{ - struct ion_buffer *buffer; - size_t total_drained = 0; - - if (ion_heap_freelist_size(heap) == 0) - return 0; - - spin_lock(&heap->free_lock); - if (size == 0) - size = heap->free_list_size; - - while (!list_empty(&heap->free_list)) { - if (total_drained >= size) - break; - buffer = list_first_entry(&heap->free_list, struct ion_buffer, - list); - list_del(&buffer->list); - heap->free_list_size -= buffer->size; - if (skip_pools) - buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; - total_drained += buffer->size; - spin_unlock(&heap->free_lock); - ion_buffer_destroy(buffer); - spin_lock(&heap->free_lock); - } - spin_unlock(&heap->free_lock); - - return total_drained; -} - -size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) -{ - return _ion_heap_freelist_drain(heap, size, false); -} - -size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) -{ - return _ion_heap_freelist_drain(heap, size, true); -} - -static int ion_heap_deferred_free(void *data) -{ - struct ion_heap *heap = data; - - while (true) { - struct ion_buffer *buffer; - - wait_event_freezable(heap->waitqueue, - ion_heap_freelist_size(heap) > 0); - - spin_lock(&heap->free_lock); - if (list_empty(&heap->free_list)) { - spin_unlock(&heap->free_lock); - continue; - } - buffer = list_first_entry(&heap->free_list, struct ion_buffer, - list); - list_del(&buffer->list); - heap->free_list_size -= buffer->size; - spin_unlock(&heap->free_lock); - ion_buffer_destroy(buffer); - } - - return 0; -} - -int ion_heap_init_deferred_free(struct ion_heap *heap) -{ - INIT_LIST_HEAD(&heap->free_list); - init_waitqueue_head(&heap->waitqueue); - heap->task = kthread_run(ion_heap_deferred_free, heap, - "%s", heap->name); - if (IS_ERR(heap->task)) { - pr_err("%s: creating thread for deferred free failed\n", - __func__); - return PTR_ERR_OR_ZERO(heap->task); - } - sched_set_normal(heap->task, 19); - - return 0; -} - -static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, - struct shrink_control *sc) -{ - struct ion_heap *heap = container_of(shrinker, struct ion_heap, - shrinker); - int total = 0; - - total = ion_heap_freelist_size(heap) / PAGE_SIZE; - - if (heap->ops->shrink) - total += heap->ops->shrink(heap, sc->gfp_mask, 0); - - return total; -} - -static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, - struct shrink_control *sc) -{ - struct ion_heap *heap = container_of(shrinker, struct ion_heap, - shrinker); - int freed = 0; - int to_scan = sc->nr_to_scan; - - if (to_scan == 0) - return 0; - - /* - * shrink the free list first, no point in zeroing the memory if we're - * just going to reclaim it. Also, skip any possible page pooling. - */ - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) - freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / - PAGE_SIZE; - - to_scan -= freed; - if (to_scan <= 0) - return freed; - - if (heap->ops->shrink) - freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); - - return freed; -} - -int ion_heap_init_shrinker(struct ion_heap *heap) -{ - heap->shrinker.count_objects = ion_heap_shrink_count; - heap->shrinker.scan_objects = ion_heap_shrink_scan; - heap->shrinker.seeks = DEFAULT_SEEKS; - heap->shrinker.batch = 0; - - return register_shrinker(&heap->shrinker); -} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c deleted file mode 100644 index 0198b886d906..000000000000 --- a/drivers/staging/android/ion/ion_page_pool.c +++ /dev/null @@ -1,155 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ION Memory Allocator page pool helpers - * - * Copyright (C) 2011 Google, Inc. - */ - -#include <linux/list.h> -#include <linux/slab.h> -#include <linux/swap.h> -#include <linux/sched/signal.h> - -#include "ion.h" - -static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) -{ - if (fatal_signal_pending(current)) - return NULL; - return alloc_pages(pool->gfp_mask, pool->order); -} - -static void ion_page_pool_free_pages(struct ion_page_pool *pool, - struct page *page) -{ - __free_pages(page, pool->order); -} - -static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page) -{ - mutex_lock(&pool->mutex); - if (PageHighMem(page)) { - list_add_tail(&page->lru, &pool->high_items); - pool->high_count++; - } else { - list_add_tail(&page->lru, &pool->low_items); - pool->low_count++; - } - - mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, - 1 << pool->order); - mutex_unlock(&pool->mutex); -} - -static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) -{ - struct page *page; - - if (high) { - BUG_ON(!pool->high_count); - page = list_first_entry(&pool->high_items, struct page, lru); - pool->high_count--; - } else { - BUG_ON(!pool->low_count); - page = list_first_entry(&pool->low_items, struct page, lru); - pool->low_count--; - } - - list_del(&page->lru); - mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, - -(1 << pool->order)); - return page; -} - -struct page *ion_page_pool_alloc(struct ion_page_pool *pool) -{ - struct page *page = NULL; - - BUG_ON(!pool); - - mutex_lock(&pool->mutex); - if (pool->high_count) - page = ion_page_pool_remove(pool, true); - else if (pool->low_count) - page = ion_page_pool_remove(pool, false); - mutex_unlock(&pool->mutex); - - if (!page) - page = ion_page_pool_alloc_pages(pool); - - return page; -} - -void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) -{ - BUG_ON(pool->order != compound_order(page)); - - ion_page_pool_add(pool, page); -} - -static int ion_page_pool_total(struct ion_page_pool *pool, bool high) -{ - int count = pool->low_count; - - if (high) - count += pool->high_count; - - return count << pool->order; -} - -int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, - int nr_to_scan) -{ - int freed = 0; - bool high; - - if (current_is_kswapd()) - high = true; - else - high = !!(gfp_mask & __GFP_HIGHMEM); - - if (nr_to_scan == 0) - return ion_page_pool_total(pool, high); - - while (freed < nr_to_scan) { - struct page *page; - - mutex_lock(&pool->mutex); - if (pool->low_count) { - page = ion_page_pool_remove(pool, false); - } else if (high && pool->high_count) { - page = ion_page_pool_remove(pool, true); - } else { - mutex_unlock(&pool->mutex); - break; - } - mutex_unlock(&pool->mutex); - ion_page_pool_free_pages(pool, page); - freed += (1 << pool->order); - } - - return freed; -} - -struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) -{ - struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); - - if (!pool) - return NULL; - pool->high_count = 0; - pool->low_count = 0; - INIT_LIST_HEAD(&pool->low_items); - INIT_LIST_HEAD(&pool->high_items); - pool->gfp_mask = gfp_mask | __GFP_COMP; - pool->order = order; - mutex_init(&pool->mutex); - plist_node_init(&pool->list, order); - - return pool; -} - -void ion_page_pool_destroy(struct ion_page_pool *pool) -{ - kfree(pool); -} diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c deleted file mode 100644 index eac0632ab4e8..000000000000 --- a/drivers/staging/android/ion/ion_system_heap.c +++ /dev/null @@ -1,377 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ION Memory Allocator system heap exporter - * - * Copyright (C) 2011 Google, Inc. - */ - -#include <asm/page.h> -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/highmem.h> -#include <linux/mm.h> -#include <linux/scatterlist.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include "ion.h" - -#define NUM_ORDERS ARRAY_SIZE(orders) - -static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | - __GFP_NORETRY) & ~__GFP_RECLAIM; -static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO; -static const unsigned int orders[] = {8, 4, 0}; - -static int order_to_index(unsigned int order) -{ - int i; - - for (i = 0; i < NUM_ORDERS; i++) - if (order == orders[i]) - return i; - BUG(); - return -1; -} - -static inline unsigned int order_to_size(int order) -{ - return PAGE_SIZE << order; -} - -struct ion_system_heap { - struct ion_heap heap; - struct ion_page_pool *pools[NUM_ORDERS]; -}; - -static struct page *alloc_buffer_page(struct ion_system_heap *heap, - struct ion_buffer *buffer, - unsigned long order) -{ - struct ion_page_pool *pool = heap->pools[order_to_index(order)]; - - return ion_page_pool_alloc(pool); -} - -static void free_buffer_page(struct ion_system_heap *heap, - struct ion_buffer *buffer, struct page *page) -{ - struct ion_page_pool *pool; - unsigned int order = compound_order(page); - - /* go to system */ - if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) { - __free_pages(page, order); - return; - } - - pool = heap->pools[order_to_index(order)]; - - ion_page_pool_free(pool, page); -} - -static struct page *alloc_largest_available(struct ion_system_heap *heap, - struct ion_buffer *buffer, - unsigned long size, - unsigned int max_order) -{ - struct page *page; - int i; - - for (i = 0; i < NUM_ORDERS; i++) { - if (size < order_to_size(orders[i])) - continue; - if (max_order < orders[i]) - continue; - - page = alloc_buffer_page(heap, buffer, orders[i]); - if (!page) - continue; - - return page; - } - - return NULL; -} - -static int ion_system_heap_allocate(struct ion_heap *heap, - struct ion_buffer *buffer, - unsigned long size, - unsigned long flags) -{ - struct ion_system_heap *sys_heap = container_of(heap, - struct ion_system_heap, - heap); - struct sg_table *table; - struct scatterlist *sg; - struct list_head pages; - struct page *page, *tmp_page; - int i = 0; - unsigned long size_remaining = PAGE_ALIGN(size); - unsigned int max_order = orders[0]; - - if (size / PAGE_SIZE > totalram_pages() / 2) - return -ENOMEM; - - INIT_LIST_HEAD(&pages); - while (size_remaining > 0) { - page = alloc_largest_available(sys_heap, buffer, size_remaining, - max_order); - if (!page) - goto free_pages; - list_add_tail(&page->lru, &pages); - size_remaining -= page_size(page); - max_order = compound_order(page); - i++; - } - table = kmalloc(sizeof(*table), GFP_KERNEL); - if (!table) - goto free_pages; - - if (sg_alloc_table(table, i, GFP_KERNEL)) - goto free_table; - - sg = table->sgl; - list_for_each_entry_safe(page, tmp_page, &pages, lru) { - sg_set_page(sg, page, page_size(page), 0); - sg = sg_next(sg); - list_del(&page->lru); - } - - buffer->sg_table = table; - return 0; - -free_table: - kfree(table); -free_pages: - list_for_each_entry_safe(page, tmp_page, &pages, lru) - free_buffer_page(sys_heap, buffer, page); - return -ENOMEM; -} - -static void ion_system_heap_free(struct ion_buffer *buffer) -{ - struct ion_system_heap *sys_heap = container_of(buffer->heap, - struct ion_system_heap, - heap); - struct sg_table *table = buffer->sg_table; - struct scatterlist *sg; - int i; - - /* zero the buffer before goto page pool */ - if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) - ion_heap_buffer_zero(buffer); - - for_each_sgtable_sg(table, sg, i) - free_buffer_page(sys_heap, buffer, sg_page(sg)); - sg_free_table(table); - kfree(table); -} - -static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, - int nr_to_scan) -{ - struct ion_page_pool *pool; - struct ion_system_heap *sys_heap; - int nr_total = 0; - int i, nr_freed; - int only_scan = 0; - - sys_heap = container_of(heap, struct ion_system_heap, heap); - - if (!nr_to_scan) - only_scan = 1; - - for (i = 0; i < NUM_ORDERS; i++) { - pool = sys_heap->pools[i]; - - if (only_scan) { - nr_total += ion_page_pool_shrink(pool, - gfp_mask, - nr_to_scan); - - } else { - nr_freed = ion_page_pool_shrink(pool, - gfp_mask, - nr_to_scan); - nr_to_scan -= nr_freed; - nr_total += nr_freed; - if (nr_to_scan <= 0) - break; - } - } - return nr_total; -} - -static struct ion_heap_ops system_heap_ops = { - .allocate = ion_system_heap_allocate, - .free = ion_system_heap_free, - .map_kernel = ion_heap_map_kernel, - .unmap_kernel = ion_heap_unmap_kernel, - .map_user = ion_heap_map_user, - .shrink = ion_system_heap_shrink, -}; - -static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) -{ - int i; - - for (i = 0; i < NUM_ORDERS; i++) - if (pools[i]) - ion_page_pool_destroy(pools[i]); -} - -static int ion_system_heap_create_pools(struct ion_page_pool **pools) -{ - int i; - - for (i = 0; i < NUM_ORDERS; i++) { - struct ion_page_pool *pool; - gfp_t gfp_flags = low_order_gfp_flags; - - if (orders[i] > 4) - gfp_flags = high_order_gfp_flags; - - pool = ion_page_pool_create(gfp_flags, orders[i]); - if (!pool) - goto err_create_pool; - pools[i] = pool; - } - - return 0; - -err_create_pool: - ion_system_heap_destroy_pools(pools); - return -ENOMEM; -} - -static struct ion_heap *__ion_system_heap_create(void) -{ - struct ion_system_heap *heap; - - heap = kzalloc(sizeof(*heap), GFP_KERNEL); - if (!heap) - return ERR_PTR(-ENOMEM); - heap->heap.ops = &system_heap_ops; - heap->heap.type = ION_HEAP_TYPE_SYSTEM; - heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; - - if (ion_system_heap_create_pools(heap->pools)) - goto free_heap; - - return &heap->heap; - -free_heap: - kfree(heap); - return ERR_PTR(-ENOMEM); -} - -static int ion_system_heap_create(void) -{ - struct ion_heap *heap; - - heap = __ion_system_heap_create(); - if (IS_ERR(heap)) - return PTR_ERR(heap); - heap->name = "ion_system_heap"; - - ion_device_add_heap(heap); - - return 0; -} -device_initcall(ion_system_heap_create); - -static int ion_system_contig_heap_allocate(struct ion_heap *heap, - struct ion_buffer *buffer, - unsigned long len, - unsigned long flags) -{ - int order = get_order(len); - struct page *page; - struct sg_table *table; - unsigned long i; - int ret; - - page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order); - if (!page) - return -ENOMEM; - - split_page(page, order); - - len = PAGE_ALIGN(len); - for (i = len >> PAGE_SHIFT; i < (1 << order); i++) - __free_page(page + i); - - table = kmalloc(sizeof(*table), GFP_KERNEL); - if (!table) { - ret = -ENOMEM; - goto free_pages; - } - - ret = sg_alloc_table(table, 1, GFP_KERNEL); - if (ret) - goto free_table; - - sg_set_page(table->sgl, page, len, 0); - - buffer->sg_table = table; - - return 0; - -free_table: - kfree(table); -free_pages: - for (i = 0; i < len >> PAGE_SHIFT; i++) - __free_page(page + i); - - return ret; -} - -static void ion_system_contig_heap_free(struct ion_buffer *buffer) -{ - struct sg_table *table = buffer->sg_table; - struct page *page = sg_page(table->sgl); - unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; - unsigned long i; - - for (i = 0; i < pages; i++) - __free_page(page + i); - sg_free_table(table); - kfree(table); -} - -static struct ion_heap_ops kmalloc_ops = { - .allocate = ion_system_contig_heap_allocate, - .free = ion_system_contig_heap_free, - .map_kernel = ion_heap_map_kernel, - .unmap_kernel = ion_heap_unmap_kernel, - .map_user = ion_heap_map_user, -}; - -static struct ion_heap *__ion_system_contig_heap_create(void) -{ - struct ion_heap *heap; - - heap = kzalloc(sizeof(*heap), GFP_KERNEL); - if (!heap) - return ERR_PTR(-ENOMEM); - heap->ops = &kmalloc_ops; - heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; - heap->name = "ion_system_contig_heap"; - - return heap; -} - -static int ion_system_contig_heap_create(void) -{ - struct ion_heap *heap; - - heap = __ion_system_contig_heap_create(); - if (IS_ERR(heap)) - return PTR_ERR(heap); - - ion_device_add_heap(heap); - - return 0; -} -device_initcall(ion_system_contig_heap_create); |