summaryrefslogtreecommitdiffstats
path: root/drivers/staging/android
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android')
-rw-r--r--drivers/staging/android/Kconfig13
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/ion/Kconfig12
-rw-r--r--drivers/staging/android/ion/Makefile4
-rw-r--r--drivers/staging/android/ion/devicetree.txt51
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c195
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c177
-rw-r--r--drivers/staging/android/ion/ion.c388
-rw-r--r--drivers/staging/android/ion/ion.h41
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c43
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c29
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c36
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c10
-rw-r--r--drivers/staging/android/ion/ion_heap.c10
-rw-r--r--drivers/staging/android/ion/ion_of.c185
-rw-r--r--drivers/staging/android/ion/ion_of.h37
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c12
-rw-r--r--drivers/staging/android/ion/ion_priv.h131
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c239
-rw-r--r--drivers/staging/android/ion/ion_test.c23
-rw-r--r--drivers/staging/android/lowmemorykiller.c13
-rw-r--r--drivers/staging/android/sw_sync.c344
-rw-r--r--drivers/staging/android/sync_debug.c230
-rw-r--r--drivers/staging/android/sync_debug.h84
-rw-r--r--drivers/staging/android/trace/sync.h32
-rw-r--r--drivers/staging/android/uapi/ion.h69
26 files changed, 913 insertions, 1496 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 06e41d2..6c00d6f 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -24,19 +24,6 @@ config ANDROID_LOW_MEMORY_KILLER
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
-config SW_SYNC
- bool "Software synchronization framework"
- default n
- depends on SYNC_FILE
- depends on DEBUG_FS
- ---help---
- A sync object driver that uses a 32bit counter to coordinate
- synchronization. Useful when there is no hardware primitive backing
- the synchronization.
-
- WARNING: improper use of this can result in deadlocking kernel
- drivers from userspace. Intended for test and debug only.
-
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 7ca61b7..7ed1be7 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -4,4 +4,3 @@ obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 19c1572..c8fb413 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -36,7 +36,19 @@ config ION_TEGRA
config ION_HISI
tristate "Ion for Hisilicon"
depends on ARCH_HISI && ION
+ select ION_OF
help
Choose this option if you wish to use ion on Hisilicon Platform.
source "drivers/staging/android/ion/hisilicon/Kconfig"
+
+config ION_OF
+ bool "Devicetree support for Ion"
+ depends on ION && OF_ADDRESS
+ help
+ Provides base support for defining Ion heaps in devicetree
+ and setting them up. Also includes functions for platforms
+ to parse the devicetree and expand for their own custom
+ extensions
+
+ If using Ion and devicetree, you should say Y here
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 18cc2aa..5d630a0 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,4 +1,5 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
+ ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
@@ -8,4 +9,5 @@ endif
obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_HISI) += hisilicon/
+obj-$(CONFIG_ION_OF) += ion_of.o
diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt
new file mode 100644
index 0000000..16871527
--- /dev/null
+++ b/drivers/staging/android/ion/devicetree.txt
@@ -0,0 +1,51 @@
+Ion Memory Manager
+
+Ion is a memory manager that allows for sharing of buffers via dma-buf.
+Ion allows for different types of allocation via an abstraction called
+a 'heap'. A heap represents a specific type of memory. Each heap has
+a different type. There can be multiple instances of the same heap
+type.
+
+Specific heap instances are tied to heap IDs. Heap IDs are not to be specified
+in the devicetree.
+
+Required properties for Ion
+
+- compatible: "linux,ion" PLUS a compatible property for the device
+
+All child nodes of a linux,ion node are interpreted as heaps
+
+required properties for heaps
+
+- compatible: compatible string for a heap type PLUS a compatible property
+for the specific instance of the heap. Current heap types
+-- linux,ion-heap-system
+-- linux,ion-heap-system-contig
+-- linux,ion-heap-carveout
+-- linux,ion-heap-chunk
+-- linux,ion-heap-dma
+-- linux,ion-heap-custom
+
+Optional properties
+- memory-region: A phandle to a memory region. Required for DMA heap type
+(see reserved-memory.txt for details on the reservation)
+
+Example:
+
+ ion {
+ compatbile = "hisilicon,ion", "linux,ion";
+
+ ion-system-heap {
+ compatbile = "hisilicon,system-heap", "linux,ion-heap-system"
+ };
+
+ ion-camera-region {
+ compatible = "hisilicon,camera-heap", "linux,ion-heap-dma"
+ memory-region = <&camera_region>;
+ };
+
+ ion-fb-region {
+ compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma"
+ memory-region = <&fb_region>;
+ };
+ }
diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
index fe9f0fd..0de7897 100644
--- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c
+++ b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
@@ -19,181 +19,74 @@
#include <linux/mm.h>
#include "../ion_priv.h"
#include "../ion.h"
+#include "../ion_of.h"
-struct hi6220_ion_type_table {
- const char *name;
- enum ion_heap_type type;
+struct hisi_ion_dev {
+ struct ion_heap **heaps;
+ struct ion_device *idev;
+ struct ion_platform_data *data;
};
-static struct hi6220_ion_type_table ion_type_table[] = {
- {"ion_system", ION_HEAP_TYPE_SYSTEM},
- {"ion_system_contig", ION_HEAP_TYPE_SYSTEM_CONTIG},
- {"ion_carveout", ION_HEAP_TYPE_CARVEOUT},
- {"ion_chunk", ION_HEAP_TYPE_CHUNK},
- {"ion_dma", ION_HEAP_TYPE_DMA},
- {"ion_custom", ION_HEAP_TYPE_CUSTOM},
+static struct ion_of_heap hisi_heaps[] = {
+ PLATFORM_HEAP("hisilicon,sys_user", 0,
+ ION_HEAP_TYPE_SYSTEM, "sys_user"),
+ PLATFORM_HEAP("hisilicon,sys_contig", 1,
+ ION_HEAP_TYPE_SYSTEM_CONTIG, "sys_contig"),
+ PLATFORM_HEAP("hisilicon,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
+ "cma"),
+ {}
};
-static struct ion_device *idev;
-static int num_heaps;
-static struct ion_heap **heaps;
-static struct ion_platform_heap **heaps_data;
-
-static int get_type_by_name(const char *name, enum ion_heap_type *type)
+static int hi6220_ion_probe(struct platform_device *pdev)
{
+ struct hisi_ion_dev *ipdev;
int i;
- for (i = 0; i < ARRAY_SIZE(ion_type_table); i++) {
- if (strncmp(name, ion_type_table[i].name, strlen(name)))
- continue;
-
- *type = ion_type_table[i].type;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int hi6220_set_platform_data(struct platform_device *pdev)
-{
- unsigned int base;
- unsigned int size;
- unsigned int id;
- const char *heap_name;
- const char *type_name;
- enum ion_heap_type type;
- int ret;
- struct device_node *np;
- struct ion_platform_heap *p_data;
- const struct device_node *dt_node = pdev->dev.of_node;
- int index = 0;
-
- for_each_child_of_node(dt_node, np)
- num_heaps++;
-
- heaps_data = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap *) *
- num_heaps,
- GFP_KERNEL);
- if (!heaps_data)
+ ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL);
+ if (!ipdev)
return -ENOMEM;
- for_each_child_of_node(dt_node, np) {
- ret = of_property_read_string(np, "heap-name", &heap_name);
- if (ret < 0) {
- pr_err("check the name of node %s\n", np->name);
- continue;
- }
+ platform_set_drvdata(pdev, ipdev);
- ret = of_property_read_u32(np, "heap-id", &id);
- if (ret < 0) {
- pr_err("check the id %s\n", np->name);
- continue;
- }
+ ipdev->idev = ion_device_create(NULL);
+ if (IS_ERR(ipdev->idev))
+ return PTR_ERR(ipdev->idev);
- ret = of_property_read_u32(np, "heap-base", &base);
- if (ret < 0) {
- pr_err("check the base of node %s\n", np->name);
- continue;
- }
-
- ret = of_property_read_u32(np, "heap-size", &size);
- if (ret < 0) {
- pr_err("check the size of node %s\n", np->name);
- continue;
- }
-
- ret = of_property_read_string(np, "heap-type", &type_name);
- if (ret < 0) {
- pr_err("check the type of node %s\n", np->name);
- continue;
- }
+ ipdev->data = ion_parse_dt(pdev, hisi_heaps);
+ if (IS_ERR(ipdev->data))
+ return PTR_ERR(ipdev->data);
- ret = get_type_by_name(type_name, &type);
- if (ret < 0) {
- pr_err("type name error %s!\n", type_name);
- continue;
- }
- pr_info("heap index %d : name %s base 0x%x size 0x%x id %d type %d\n",
- index, heap_name, base, size, id, type);
+ ipdev->heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_heap) * ipdev->data->nr,
+ GFP_KERNEL);
+ if (!ipdev->heaps) {
+ ion_destroy_platform_data(ipdev->data);
+ return -ENOMEM;
+ }
- p_data = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap),
- GFP_KERNEL);
- if (!p_data)
+ for (i = 0; i < ipdev->data->nr; i++) {
+ ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]);
+ if (!ipdev->heaps) {
+ ion_destroy_platform_data(ipdev->data);
return -ENOMEM;
-
- p_data->name = heap_name;
- p_data->base = base;
- p_data->size = size;
- p_data->id = id;
- p_data->type = type;
-
- heaps_data[index] = p_data;
- index++;
+ }
+ ion_device_add_heap(ipdev->idev, ipdev->heaps[i]);
}
return 0;
}
-static int hi6220_ion_probe(struct platform_device *pdev)
+static int hi6220_ion_remove(struct platform_device *pdev)
{
+ struct hisi_ion_dev *ipdev;
int i;
- int err;
- static struct ion_platform_heap *p_heap;
-
- idev = ion_device_create(NULL);
- err = hi6220_set_platform_data(pdev);
- if (err) {
- pr_err("ion set platform data error!\n");
- goto err_free_idev;
- }
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap *) * num_heaps,
- GFP_KERNEL);
- if (!heaps) {
- err = -ENOMEM;
- goto err_free_idev;
- }
-
- /*
- * create the heaps as specified in the dts file
- */
- for (i = 0; i < num_heaps; i++) {
- p_heap = heaps_data[i];
- heaps[i] = ion_heap_create(p_heap);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err_free_heaps;
- }
-
- ion_device_add_heap(idev, heaps[i]);
- pr_info("%s: adding heap %s of type %d with %lx@%lx\n",
- __func__, p_heap->name, p_heap->type,
- p_heap->base, (unsigned long)p_heap->size);
- }
- return err;
+ ipdev = platform_get_drvdata(pdev);
-err_free_heaps:
- for (i = 0; i < num_heaps; ++i) {
- ion_heap_destroy(heaps[i]);
- heaps[i] = NULL;
- }
-err_free_idev:
- ion_device_destroy(idev);
+ for (i = 0; i < ipdev->data->nr; i++)
+ ion_heap_destroy(ipdev->heaps[i]);
- return err;
-}
-
-static int hi6220_ion_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < num_heaps; i++) {
- ion_heap_destroy(heaps[i]);
- heaps[i] = NULL;
- }
- ion_device_destroy(idev);
+ ion_destroy_platform_data(ipdev->data);
+ ion_device_destroy(ipdev->idev);
return 0;
}
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
new file mode 100644
index 0000000..7e7431d
--- /dev/null
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+union ion_ioctl_arg {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ struct ion_heap_query query;
+};
+
+static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case ION_IOC_HEAP_QUERY:
+ ret = arg->query.reserved0 != 0;
+ ret |= arg->query.reserved1 != 0;
+ ret |= arg->query.reserved2 != 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret ? -EINVAL : 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+ union ion_ioctl_arg data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ /*
+ * The copy_from_user is unconditional here for both read and write
+ * to do the validate. If there is no write for the ioctl, the
+ * buffer is cleared
+ */
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ ret = validate_ioctl_arg(cmd, &data);
+ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ if (!(dir & _IOC_WRITE))
+ memset(&data, 0, sizeof(data));
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
+ return PTR_ERR(handle);
+ }
+ ion_free_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_import_dma_buf_fd(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ case ION_IOC_HEAP_QUERY:
+ ret = ion_query_heaps(client, &data.query);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a2cf93b..396ded5 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -41,80 +41,6 @@
#include "ion_priv.h"
#include "compat_ion.h"
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return (buffer->flags & ION_FLAG_CACHED) &&
@@ -174,10 +100,10 @@ static void ion_buffer_add(struct ion_device *dev,
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- struct ion_device *dev,
- unsigned long len,
- unsigned long align,
- unsigned long flags)
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
@@ -205,19 +131,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err2;
}
- buffer->dev = dev;
- buffer->size = len;
-
- table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL,
- "heap->ops->map_dma should return ERR_PTR on error"))
- table = ERR_PTR(-EINVAL);
- if (IS_ERR(table)) {
+ if (buffer->sg_table == NULL) {
+ WARN_ONCE(1, "This heap needs to set the sgtable");
ret = -EINVAL;
goto err1;
}
- buffer->sg_table = table;
+ table = buffer->sg_table;
+ buffer->dev = dev;
+ buffer->size = len;
+
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
@@ -226,7 +149,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
- goto err;
+ goto err1;
}
for_each_sg(table->sgl, sg, table->nents, i) {
@@ -260,8 +183,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
mutex_unlock(&dev->buffer_lock);
return buffer;
-err:
- heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
@@ -273,7 +194,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
@@ -337,7 +257,7 @@ static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
}
static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
struct ion_handle *handle;
@@ -377,26 +297,17 @@ static void ion_handle_destroy(struct kref *kref)
kfree(handle);
}
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
-{
- return handle->buffer;
-}
-
static void ion_handle_get(struct ion_handle *handle)
{
kref_get(&handle->ref);
}
-static int ion_handle_put_nolock(struct ion_handle *handle)
+int ion_handle_put_nolock(struct ion_handle *handle)
{
- int ret;
-
- ret = kref_put(&handle->ref, ion_handle_destroy);
-
- return ret;
+ return kref_put(&handle->ref, ion_handle_destroy);
}
-static int ion_handle_put(struct ion_handle *handle)
+int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
@@ -426,8 +337,8 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
return ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -438,7 +349,7 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
int id)
{
struct ion_handle *handle;
@@ -551,15 +462,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
}
EXPORT_SYMBOL(ion_alloc);
-static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
+void ion_free_nolock(struct ion_client *client,
+ struct ion_handle *handle)
{
- bool valid_handle;
-
- BUG_ON(client != handle->client);
-
- valid_handle = ion_handle_validate(client, handle);
-
- if (!valid_handle) {
+ if (!ion_handle_validate(client, handle)) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
@@ -576,32 +482,6 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_free);
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_buffer *buffer;
- int ret;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
-
- if (!buffer->heap->ops->phys) {
- pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
- __func__, buffer->heap->name, buffer->heap->type);
- mutex_unlock(&client->lock);
- return -ENODEV;
- }
- mutex_unlock(&client->lock);
- ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
- return ret;
-}
-EXPORT_SYMBOL(ion_phys);
-
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
@@ -612,7 +492,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL,
- "heap->ops->map_kernel should return ERR_PTR on error"))
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
@@ -781,14 +661,14 @@ static const struct file_operations debug_client_fops = {
};
static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
+ const unsigned char *name)
{
int serial = -1;
struct rb_node *node;
for (node = rb_first(root); node; node = rb_next(node)) {
struct ion_client *client = rb_entry(node, struct ion_client,
- node);
+ node);
if (strcmp(client->name, name))
continue;
@@ -863,14 +743,14 @@ struct ion_client *ion_client_create(struct ion_device *dev,
rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
+ dev->clients_debug_root,
+ client, &debug_client_fops);
if (!client->debug_root) {
char buf[256], *path;
path = dentry_path(dev->clients_debug_root, buf, 256);
pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
+ path, client->display_name);
}
up_write(&dev->lock);
@@ -917,26 +797,6 @@ void ion_client_destroy(struct ion_client *client)
}
EXPORT_SYMBOL(ion_client_destroy);
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- struct sg_table *table;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
- buffer = handle->buffer;
- table = buffer->sg_table;
- mutex_unlock(&client->lock);
- return table;
-}
-EXPORT_SYMBOL(ion_sg_table);
-
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction direction);
@@ -958,7 +818,7 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
+ size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
@@ -998,7 +858,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
if (ion_buffer_page_is_dirty(page))
ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
+ PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i);
}
@@ -1076,7 +936,7 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
+ __func__);
return -EINVAL;
}
@@ -1167,7 +1027,7 @@ static struct dma_buf_ops dma_buf_ops = {
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+ struct ion_handle *handle)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
@@ -1275,7 +1135,7 @@ struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
}
EXPORT_SYMBOL(ion_import_dma_buf_fd);
-static int ion_sync_for_device(struct ion_client *client, int fd)
+int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
@@ -1299,124 +1159,45 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
return 0;
}
-/* fix up the cases where the ioctl direction bits are incorrect */
-static unsigned int ion_ioctl_dir(unsigned int cmd)
-{
- switch (cmd) {
- case ION_IOC_SYNC:
- case ION_IOC_FREE:
- case ION_IOC_CUSTOM:
- return _IOC_WRITE;
- default:
- return _IOC_DIR(cmd);
- }
-}
-
-static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
{
- struct ion_client *client = filp->private_data;
struct ion_device *dev = client->dev;
- struct ion_handle *cleanup_handle = NULL;
- int ret = 0;
- unsigned int dir;
-
- union {
- struct ion_fd_data fd;
- struct ion_allocation_data allocation;
- struct ion_handle_data handle;
- struct ion_custom_data custom;
- } data;
+ struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
+ int ret = -EINVAL, cnt = 0, max_cnt;
+ struct ion_heap *heap;
+ struct ion_heap_data hdata;
- dir = ion_ioctl_dir(cmd);
+ memset(&hdata, 0, sizeof(hdata));
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
+ down_read(&dev->lock);
+ if (!buffer) {
+ query->cnt = dev->heap_cnt;
+ ret = 0;
+ goto out;
+ }
- if (dir & _IOC_WRITE)
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
+ if (query->cnt <= 0)
+ goto out;
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- struct ion_handle *handle;
+ max_cnt = query->cnt;
- handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
- data.allocation.heap_id_mask,
- data.allocation.flags);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
+ hdata.name[sizeof(hdata.name) - 1] = '\0';
+ hdata.type = heap->type;
+ hdata.heap_id = heap->id;
- data.allocation.handle = handle->id;
+ ret = copy_to_user(&buffer[cnt],
+ &hdata, sizeof(hdata));
- cleanup_handle = handle;
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- ion_free_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (data.fd.fd < 0)
- ret = data.fd.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_handle *handle;
-
- handle = ion_import_dma_buf_fd(client, data.fd.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
- break;
- }
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- if (!dev->custom_ioctl)
- return -ENOTTY;
- ret = dev->custom_ioctl(client, data.custom.cmd,
- data.custom.arg);
- break;
- }
- default:
- return -ENOTTY;
+ cnt++;
+ if (cnt >= max_cnt)
+ break;
}
- if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
- return -EFAULT;
- }
- }
+ query->cnt = cnt;
+out:
+ up_read(&dev->lock);
return ret;
}
@@ -1528,7 +1309,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
+ heap->free_list_size);
seq_puts(s, "----------------------------------------------------\n");
if (heap->debug_show)
@@ -1588,8 +1369,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
- if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
- !heap->ops->unmap_dma)
+ if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
@@ -1611,15 +1391,15 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
if (!debug_file) {
char buf[256], *path;
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
+ path, heap->name);
}
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
@@ -1634,10 +1414,11 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
path = dentry_path(dev->heaps_debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
- path, debug_name);
+ path, debug_name);
}
}
+ dev->heap_cnt++;
up_write(&dev->lock);
}
EXPORT_SYMBOL(ion_device_add_heap);
@@ -1702,38 +1483,3 @@ void ion_device_destroy(struct ion_device *dev)
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
-
-void __init ion_reserve(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++) {
- if (data->heaps[i].size == 0)
- continue;
-
- if (data->heaps[i].base == 0) {
- phys_addr_t paddr;
-
- paddr = memblock_alloc_base(data->heaps[i].size,
- data->heaps[i].align,
- MEMBLOCK_ALLOC_ANYWHERE);
- if (!paddr) {
- pr_err("%s: error allocating memblock for heap %d\n",
- __func__, i);
- continue;
- }
- data->heaps[i].base = paddr;
- } else {
- int ret = memblock_reserve(data->heaps[i].base,
- data->heaps[i].size);
- if (ret)
- pr_err("memblock reserve of %zx@%lx failed\n",
- data->heaps[i].size,
- data->heaps[i].base);
- }
- pr_info("%s: %s reserved base %lx size %zu\n", __func__,
- data->heaps[i].name,
- data->heaps[i].base,
- data->heaps[i].size);
- }
-}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index a1331fc..93dafb4 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -73,17 +73,6 @@ struct ion_platform_data {
};
/**
- * ion_reserve() - reserve memory for ion heaps if applicable
- * @data: platform data specifying starting physical address and
- * size
- *
- * Calls memblock reserve to set aside memory for heaps that are
- * located at specific memory addresses or of specific sizes not
- * managed by the kernel
- */
-void ion_reserve(struct ion_platform_data *data);
-
-/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @name: used for debugging
@@ -130,36 +119,6 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
void ion_free(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_phys - returns the physical address and len of a handle
- * @client: the client
- * @handle: the handle
- * @addr: a pointer to put the address in
- * @len: a pointer to put the length in
- *
- * This function queries the heap for a particular handle to get the
- * handle's physical address. It't output is only correct if
- * a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_sg_table should be used
- * instead. Returns -EINVAL if the handle is invalid. This has
- * no implications on the reference counting of the handle --
- * the returned value may not be valid if the caller is not
- * holding a reference.
- */
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len);
-
-/**
- * ion_map_dma - return an sg_table describing a handle
- * @client: the client
- * @handle: the handle
- *
- * This function returns the sg_table describing
- * a particular ion handle.
- */
-struct sg_table *ion_sg_table(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 1fb0d81..a8ea973 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -25,15 +25,17 @@
#include "ion.h"
#include "ion_priv.h"
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
};
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -45,8 +47,8 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
return offset;
}
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size)
+static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -56,19 +58,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
gen_pool_free(carveout_heap->pool, addr, size);
}
-static int ion_carveout_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
-
- *addr = paddr;
- *len = buffer->size;
- return 0;
-}
-
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
@@ -95,7 +84,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
- buffer->priv_virt = table;
+ buffer->sg_table = table;
return 0;
@@ -109,7 +98,7 @@ err_free:
static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
- struct sg_table *table = buffer->priv_virt;
+ struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
@@ -124,23 +113,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
- .phys = ion_carveout_heap_phys,
- .map_dma = ion_carveout_heap_map_dma,
- .unmap_dma = ion_carveout_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index e0553fe..70495dc 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -34,9 +34,9 @@ struct ion_chunk_heap {
};
static int ion_chunk_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
@@ -71,11 +71,11 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (!paddr)
goto err;
sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
- chunk_heap->chunk_size, 0);
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
- buffer->priv_virt = table;
+ buffer->sg_table = table;
chunk_heap->allocated += allocated_size;
return 0;
err:
@@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
- struct sg_table *table = buffer->priv_virt;
+ struct sg_table *table = buffer->sg_table;
struct scatterlist *sg;
int i;
unsigned long allocated_size;
@@ -106,7 +106,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
@@ -117,22 +117,9 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static struct ion_heap_ops chunk_heap_ops = {
.allocate = ion_chunk_heap_allocate,
.free = ion_chunk_heap_free,
- .map_dma = ion_chunk_heap_map_dma,
- .unmap_dma = ion_chunk_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
@@ -174,7 +161,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ chunk_heap->base, heap_data->size, heap_data->align);
return &chunk_heap->heap;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index a3446da..6c7de74 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -78,6 +78,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
+ buffer->sg_table = info->table;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
@@ -105,36 +106,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
kfree(info);
}
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
- &info->handle);
-
- *addr = info->handle;
- *len = buffer->size;
-
- return 0;
-}
-
-static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return info->table;
-}
-
-static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
@@ -155,16 +126,13 @@ static void *ion_cma_map_kernel(struct ion_heap *heap,
}
static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
}
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
- .map_dma = ion_cma_heap_map_dma,
- .unmap_dma = ion_cma_heap_unmap_dma,
- .phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 814a3c9..b23f2c7 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -99,7 +99,7 @@ static int __init ion_dummy_init(void)
struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
- !heap_data->base)
+ !heap_data->base)
continue;
if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
@@ -120,12 +120,12 @@ err:
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
return err;
@@ -144,12 +144,12 @@ static void __exit ion_dummy_exit(void)
if (carveout_ptr) {
free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
carveout_ptr = NULL;
}
if (chunk_ptr) {
free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
chunk_ptr = NULL;
}
}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index ca15a87..4e5c0f1 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -93,7 +93,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
}
len = min(len, remainder);
ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
- vma->vm_page_prot);
+ vma->vm_page_prot);
if (ret)
return ret;
addr += len;
@@ -116,7 +116,7 @@ static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
}
static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
- pgprot_t pgprot)
+ pgprot_t pgprot)
{
int p = 0;
int ret = 0;
@@ -181,7 +181,7 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
}
static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
- bool skip_pools)
+ bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
@@ -266,7 +266,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
}
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
@@ -279,7 +279,7 @@ static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
}
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
+ struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
new file mode 100644
index 0000000..15bac92
--- /dev/null
+++ b/drivers/staging/android/ion/ion_of.c
@@ -0,0 +1,185 @@
+/*
+ * Based on work from:
+ * Andrew Andrianov <andrew@ncrmnt.org>
+ * Google
+ * The Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include <linux/io.h>
+#include <linux/of_reserved_mem.h>
+#include "ion.h"
+#include "ion_priv.h"
+#include "ion_of.h"
+
+static int ion_parse_dt_heap_common(struct device_node *heap_node,
+ struct ion_platform_heap *heap,
+ struct ion_of_heap *compatible)
+{
+ int i;
+
+ for (i = 0; compatible[i].name; i++) {
+ if (of_device_is_compatible(heap_node, compatible[i].compat))
+ break;
+ }
+
+ if (!compatible[i].name)
+ return -ENODEV;
+
+ heap->id = compatible[i].heap_id;
+ heap->type = compatible[i].type;
+ heap->name = compatible[i].name;
+ heap->align = compatible[i].align;
+
+ /* Some kind of callback function pointer? */
+
+ pr_info("%s: id %d type %d name %s align %lx\n", __func__,
+ heap->id, heap->type, heap->name, heap->align);
+ return 0;
+}
+
+static int ion_setup_heap_common(struct platform_device *parent,
+ struct device_node *heap_node,
+ struct ion_platform_heap *heap)
+{
+ int ret = 0;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ case ION_HEAP_TYPE_CHUNK:
+ if (heap->base && heap->size)
+ return 0;
+
+ ret = of_reserved_mem_device_init(heap->priv);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
+ struct ion_of_heap *compatible)
+{
+ int num_heaps, ret;
+ const struct device_node *dt_node = pdev->dev.of_node;
+ struct device_node *node;
+ struct ion_platform_heap *heaps;
+ struct ion_platform_data *data;
+ int i = 0;
+
+ num_heaps = of_get_available_child_count(dt_node);
+
+ if (!num_heaps)
+ return ERR_PTR(-EINVAL);
+
+ heaps = devm_kzalloc(&pdev->dev,
+ sizeof(struct ion_platform_heap) * num_heaps,
+ GFP_KERNEL);
+ if (!heaps)
+ return ERR_PTR(-ENOMEM);
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
+ GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_available_child_of_node(dt_node, node) {
+ struct platform_device *heap_pdev;
+
+ ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
+ if (ret)
+ return ERR_PTR(ret);
+
+ heap_pdev = of_platform_device_create(node, heaps[i].name,
+ &pdev->dev);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+ heap_pdev->dev.platform_data = &heaps[i];
+
+ heaps[i].priv = &heap_pdev->dev;
+
+ ret = ion_setup_heap_common(pdev, node, &heaps[i]);
+ if (ret)
+ goto out_err;
+ i++;
+ }
+
+ data->heaps = heaps;
+ data->nr = num_heaps;
+ return data;
+
+out_err:
+ for ( ; i >= 0; i--)
+ if (heaps[i].priv)
+ of_device_unregister(to_platform_device(heaps[i].priv));
+
+ return ERR_PTR(ret);
+}
+
+void ion_destroy_platform_data(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++)
+ if (data->heaps[i].priv)
+ of_device_unregister(to_platform_device(
+ data->heaps[i].priv));
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ion_platform_heap *heap = pdev->dev.platform_data;
+
+ heap->base = rmem->base;
+ heap->base = rmem->size;
+ pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__,
+ heap->name, &rmem->base, &rmem->size, dev);
+ return 0;
+}
+
+static void rmem_ion_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ return;
+}
+
+static const struct reserved_mem_ops rmem_dma_ops = {
+ .device_init = rmem_ion_device_init,
+ .device_release = rmem_ion_device_release,
+};
+
+static int __init rmem_ion_setup(struct reserved_mem *rmem)
+{
+ phys_addr_t size = rmem->size;
+
+ size = size / 1024;
+
+ pr_info("Ion memory setup at %pa size %pa MiB\n",
+ &rmem->base, &size);
+ rmem->ops = &rmem_dma_ops;
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup);
+#endif
diff --git a/drivers/staging/android/ion/ion_of.h b/drivers/staging/android/ion/ion_of.h
new file mode 100644
index 0000000..8241a17
--- /dev/null
+++ b/drivers/staging/android/ion/ion_of.h
@@ -0,0 +1,37 @@
+/*
+ * Based on work from:
+ * Andrew Andrianov <andrew@ncrmnt.org>
+ * Google
+ * The Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ION_OF_H
+#define _ION_OF_H
+
+struct ion_of_heap {
+ const char *compat;
+ int heap_id;
+ int type;
+ const char *name;
+ int align;
+};
+
+#define PLATFORM_HEAP(_compat, _id, _type, _name) \
+{ \
+ .compat = _compat, \
+ .heap_id = _id, \
+ .type = _type, \
+ .name = _name, \
+ .align = PAGE_SIZE, \
+}
+
+struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
+ struct ion_of_heap *compatible);
+
+void ion_destroy_platform_data(struct ion_platform_data *data);
+
+#endif
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 1fe8016..aea89c1 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -30,8 +30,9 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page)
return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+ if (!pool->cached)
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
return page;
}
@@ -114,7 +115,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
}
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
int freed = 0;
bool high;
@@ -147,7 +148,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
return freed;
}
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool cached)
{
struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
@@ -161,6 +163,8 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
pool->order = order;
mutex_init(&pool->mutex);
plist_node_init(&pool->list, order);
+ if (cached)
+ pool->cached = true;
return pool;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 0239883..3c3b324 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,11 +26,10 @@
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
+#include <linux/miscdevice.h>
#include "ion.h"
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
-
/**
* struct ion_buffer - metadata for a particular buffer
* @ref: reference count
@@ -42,8 +41,6 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
- * @priv_phys: private data to the buffer representable as
- * an ion_phys_addr_t (and someday a phys_addr_t)
* @lock: protects the buffers cnt fields
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kernel mapping if kmap_cnt is not zero
@@ -69,10 +66,7 @@ struct ion_buffer {
unsigned long flags;
unsigned long private_flags;
size_t size;
- union {
- void *priv_virt;
- ion_phys_addr_t priv_phys;
- };
+ void *priv_virt;
struct mutex lock;
int kmap_cnt;
void *vaddr;
@@ -88,13 +82,84 @@ struct ion_buffer {
void ion_buffer_destroy(struct ion_buffer *buffer);
/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
+ int heap_cnt;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ char *display_name;
+ int display_serial;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
+/**
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
* @free: free memory
- * @phys get physical address of a buffer (only define on
- * physically contiguous heaps)
- * @map_dma map the memory for dma to a scatterlist
- * @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
@@ -111,11 +176,6 @@ struct ion_heap_ops {
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags);
void (*free)(struct ion_buffer *buffer);
- int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len);
- struct sg_table * (*map_dma)(struct ion_heap *heap,
- struct ion_buffer *buffer);
- void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
@@ -328,20 +388,6 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
void ion_cma_heap_destroy(struct ion_heap *);
/**
- * kernel api to allocate/free from carveout -- used when carveout is
- * used to back an architecture specific custom heap
- */
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
- unsigned long align);
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
- unsigned long size);
-/**
- * The carveout heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
-/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
@@ -360,6 +406,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
+ * @cached: it's cached pool or not
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -369,6 +416,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
struct ion_page_pool {
int high_count;
int low_count;
+ bool cached;
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
@@ -377,7 +425,8 @@ struct ion_page_pool {
struct plist_node list;
};
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool cached);
void ion_page_pool_destroy(struct ion_page_pool *);
struct page *ion_page_pool_alloc(struct ion_page_pool *);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
@@ -403,4 +452,22 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+int ion_sync_for_device(struct ion_client *client, int fd);
+
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id);
+
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+int ion_handle_put_nolock(struct ion_handle *handle);
+
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id);
+
+int ion_handle_put(struct ion_handle *handle);
+
+int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index b69dfc7..7e023d5 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -26,16 +26,18 @@
#include "ion.h"
#include "ion_priv.h"
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO);
static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
+
static int order_to_index(unsigned int order)
{
int i;
- for (i = 0; i < num_orders; i++)
+ for (i = 0; i < NUM_ORDERS; i++)
if (order == orders[i])
return i;
BUG();
@@ -49,47 +51,55 @@ static inline unsigned int order_to_size(int order)
struct ion_system_heap {
struct ion_heap heap;
- struct ion_page_pool *pools[0];
+ struct ion_page_pool *uncached_pools[NUM_ORDERS];
+ struct ion_page_pool *cached_pools[NUM_ORDERS];
};
+/**
+ * The page from page-pool are all zeroed before. We need do cache
+ * clean for cached buffer. The uncached buffer are always non-cached
+ * since it's allocated. So no need for non-cached pages.
+ */
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct ion_page_pool *pool;
struct page *page;
- if (!cached) {
- page = ion_page_pool_alloc(pool);
- } else {
- gfp_t gfp_flags = low_order_gfp_flags;
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
- if (order > 4)
- gfp_flags = high_order_gfp_flags;
- page = alloc_pages(gfp_flags | __GFP_COMP, order);
- if (!page)
- return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
- }
+ page = ion_page_pool_alloc(pool);
+ if (cached)
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page)
{
+ struct ion_page_pool *pool;
unsigned int order = compound_order(page);
bool cached = ion_buffer_cached(buffer);
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
- ion_page_pool_free(pool, page);
- } else {
+ /* go to system */
+ if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
__free_pages(page, order);
+ return;
}
+
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
+
+ ion_page_pool_free(pool, page);
}
@@ -101,7 +111,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
struct page *page;
int i;
- for (i = 0; i < num_orders; i++) {
+ for (i = 0; i < NUM_ORDERS; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
@@ -118,9 +128,9 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
}
static int ion_system_heap_allocate(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
- unsigned long flags)
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
@@ -142,7 +152,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
page = alloc_largest_available(sys_heap, buffer, size_remaining,
- max_order);
+ max_order);
if (!page)
goto free_pages;
list_add_tail(&page->lru, &pages);
@@ -164,7 +174,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
list_del(&page->lru);
}
- buffer->priv_virt = table;
+ buffer->sg_table = table;
return 0;
free_table:
@@ -181,16 +191,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
- bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
int i;
- /*
- * uncached pages come from the page pools, zero them before returning
- * for security purposes (other allocations are zerod at
- * alloc time
- */
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+ /* zero the buffer before goto page pool */
+ if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -199,20 +204,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
- int nr_to_scan)
+ int nr_to_scan)
{
+ struct ion_page_pool *uncached_pool;
+ struct ion_page_pool *cached_pool;
struct ion_system_heap *sys_heap;
int nr_total = 0;
int i, nr_freed;
@@ -223,28 +219,41 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
if (!nr_to_scan)
only_scan = 1;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
-
- nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
- nr_total += nr_freed;
-
- if (!only_scan) {
+ for (i = 0; i < NUM_ORDERS; i++) {
+ uncached_pool = sys_heap->uncached_pools[i];
+ cached_pool = sys_heap->cached_pools[i];
+
+ if (only_scan) {
+ nr_total += ion_page_pool_shrink(uncached_pool,
+ gfp_mask,
+ nr_to_scan);
+
+ nr_total += ion_page_pool_shrink(cached_pool,
+ gfp_mask,
+ nr_to_scan);
+ } else {
+ nr_freed = ion_page_pool_shrink(uncached_pool,
+ gfp_mask,
+ nr_to_scan);
+ nr_to_scan -= nr_freed;
+ nr_total += nr_freed;
+ if (nr_to_scan <= 0)
+ break;
+ nr_freed = ion_page_pool_shrink(cached_pool,
+ gfp_mask,
+ nr_to_scan);
nr_to_scan -= nr_freed;
- /* shrink completed */
+ nr_total += nr_freed;
if (nr_to_scan <= 0)
break;
}
}
-
return nr_total;
}
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
- .map_dma = ion_system_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
@@ -259,52 +268,89 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
struct ion_system_heap,
heap);
int i;
+ struct ion_page_pool *pool;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ pool = sys_heap->uncached_pools[i];
+
+ seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
+ pool->high_count, pool->order,
+ (PAGE_SIZE << pool->order) * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+ pool->low_count, pool->order,
+ (PAGE_SIZE << pool->order) * pool->low_count);
+ }
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
+ for (i = 0; i < NUM_ORDERS; i++) {
+ pool = sys_heap->cached_pools[i];
- seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ seq_printf(s, "%d order %u highmem pages cached %lu total\n",
pool->high_count, pool->order,
(PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
pool->low_count, pool->order,
(PAGE_SIZE << pool->order) * pool->low_count);
}
return 0;
}
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++)
+ if (pools[i])
+ ion_page_pool_destroy(pools[i]);
+}
+
+static int ion_system_heap_create_pools(struct ion_page_pool **pools,
+ bool cached)
+{
+ int i;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ struct ion_page_pool *pool;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+
+ pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+ if (!pool)
+ goto err_create_pool;
+ pools[i] = pool;
+ }
+ return 0;
+
+err_create_pool:
+ ion_system_heap_destroy_pools(pools);
+ return -ENOMEM;
+}
+
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
- int i;
- heap = kzalloc(sizeof(struct ion_system_heap) +
- sizeof(struct ion_page_pool *) * num_orders,
- GFP_KERNEL);
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->heap.ops = &system_heap_ops;
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags = low_order_gfp_flags;
+ if (ion_system_heap_create_pools(heap->uncached_pools, false))
+ goto free_heap;
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto destroy_pools;
- heap->pools[i] = pool;
- }
+ if (ion_system_heap_create_pools(heap->cached_pools, true))
+ goto destroy_uncached_pools;
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
-destroy_pools:
- while (i--)
- ion_page_pool_destroy(heap->pools[i]);
+destroy_uncached_pools:
+ ion_system_heap_destroy_pools(heap->uncached_pools);
+
+free_heap:
kfree(heap);
return ERR_PTR(-ENOMEM);
}
@@ -316,8 +362,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
heap);
int i;
- for (i = 0; i < num_orders; i++)
- ion_page_pool_destroy(sys_heap->pools[i]);
+ for (i = 0; i < NUM_ORDERS; i++) {
+ ion_page_pool_destroy(sys_heap->uncached_pools[i]);
+ ion_page_pool_destroy(sys_heap->cached_pools[i]);
+ }
kfree(sys_heap);
}
@@ -358,7 +406,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
sg_set_page(table->sgl, page, len, 0);
- buffer->priv_virt = table;
+ buffer->sg_table = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
@@ -375,7 +423,7 @@ free_pages:
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- struct sg_table *table = buffer->priv_virt;
+ struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
unsigned long i;
@@ -386,34 +434,9 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-static int ion_system_contig_heap_phys(struct ion_heap *heap,
- struct ion_buffer *buffer,
- ion_phys_addr_t *addr, size_t *len)
-{
- struct sg_table *table = buffer->priv_virt;
- struct page *page = sg_page(table->sgl);
- *addr = page_to_phys(page);
- *len = buffer->size;
- return 0;
-}
-
-static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- return buffer->priv_virt;
-}
-
-static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
-}
-
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
- .phys = ion_system_contig_heap_phys,
- .map_dma = ion_system_contig_heap_map_dma,
- .unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 5a396a1..5abf8320 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -42,7 +42,8 @@ struct ion_test_data {
};
static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
- void __user *ptr, size_t offset, size_t size, bool write)
+ void __user *ptr, size_t offset, size_t size,
+ bool write)
{
int ret = 0;
struct dma_buf_attachment *attach;
@@ -98,7 +99,7 @@ err:
}
static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
- size_t offset, size_t size, bool write)
+ size_t offset, size_t size, bool write)
{
int ret;
unsigned long page_offset = offset >> PAGE_SHIFT;
@@ -144,7 +145,7 @@ err:
}
static long ion_test_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct ion_test_data *test_data = filp->private_data;
int ret = 0;
@@ -179,17 +180,19 @@ static long ion_test_ioctl(struct file *filp, unsigned int cmd,
case ION_IOC_TEST_DMA_MAPPING:
{
ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset, data.test_rw.size,
- data.test_rw.write);
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset,
+ data.test_rw.size,
+ data.test_rw.write);
break;
}
case ION_IOC_TEST_KERNEL_MAPPING:
{
ret = ion_handle_test_kernel(test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset, data.test_rw.size,
- data.test_rw.write);
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset,
+ data.test_rw.size,
+ data.test_rw.write);
break;
}
default:
@@ -242,7 +245,7 @@ static int __init ion_test_probe(struct platform_device *pdev)
struct ion_test_device *testdev;
testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!testdev)
return -ENOMEM;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 45a1b4e..ec3b665 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -92,8 +92,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
int other_file = global_node_page_state(NR_FILE_PAGES) -
- global_node_page_state(NR_SHMEM) -
- total_swapcache_pages();
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -204,10 +204,9 @@ device_initcall(lowmem_init);
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
*/
-module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
-module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
- S_IRUGO | S_IWUSR);
+module_param_named(cost, lowmem_shrinker.seeks, int, 0644);
+module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644);
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
- S_IRUGO | S_IWUSR);
-module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+ 0644);
+module_param_named(debug_level, lowmem_debug_level, uint, 0644);
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
deleted file mode 100644
index 115c917..0000000
--- a/drivers/staging/android/sw_sync.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * drivers/dma-buf/sw_sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/sync_file.h>
-
-#include "sync_debug.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace/sync.h"
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-static const struct fence_ops timeline_fence_ops;
-
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
-{
- if (fence->ops != &timeline_fence_ops)
- return NULL;
- return container_of(fence, struct sync_pt, base);
-}
-
-/**
- * sync_timeline_create() - creates a sync object
- * @name: sync_timeline name
- *
- * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
- * case of error.
- */
-struct sync_timeline *sync_timeline_create(const char *name)
-{
- struct sync_timeline *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- return NULL;
-
- kref_init(&obj->kref);
- obj->context = fence_context_alloc(1);
- strlcpy(obj->name, name, sizeof(obj->name));
-
- INIT_LIST_HEAD(&obj->child_list_head);
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->child_list_lock);
-
- sync_timeline_debug_add(obj);
-
- return obj;
-}
-
-static void sync_timeline_free(struct kref *kref)
-{
- struct sync_timeline *obj =
- container_of(kref, struct sync_timeline, kref);
-
- sync_timeline_debug_remove(obj);
-
- kfree(obj);
-}
-
-static void sync_timeline_get(struct sync_timeline *obj)
-{
- kref_get(&obj->kref);
-}
-
-static void sync_timeline_put(struct sync_timeline *obj)
-{
- kref_put(&obj->kref, sync_timeline_free);
-}
-
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj: sync_timeline to signal
- * @inc: num to increment on timeline->value
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
-{
- unsigned long flags;
- struct sync_pt *pt, *next;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
-
- obj->value += inc;
-
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
- active_list) {
- if (fence_is_signaled_locked(&pt->base))
- list_del_init(&pt->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent: fence's parent sync_timeline
- * @size: size to allocate for this pt
- * @inc: value of the fence
- *
- * Creates a new sync_pt as a child of @parent. @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the sync_pt object or
- * NULL in case of error.
- */
-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
- unsigned int value)
-{
- unsigned long flags;
- struct sync_pt *pt;
-
- if (size < sizeof(*pt))
- return NULL;
-
- pt = kzalloc(size, GFP_KERNEL);
- if (!pt)
- return NULL;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- sync_timeline_get(obj);
- fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&pt->active_list);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return pt;
-}
-
-static const char *timeline_fence_get_driver_name(struct fence *fence)
-{
- return "sw_sync";
-}
-
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- return parent->name;
-}
-
-static void timeline_fence_release(struct fence *fence)
-{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
- unsigned long flags;
-
- spin_lock_irqsave(fence->lock, flags);
- list_del(&pt->child_list);
- if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
- list_del(&pt->active_list);
- spin_unlock_irqrestore(fence->lock, flags);
-
- sync_timeline_put(parent);
- fence_free(fence);
-}
-
-static bool timeline_fence_signaled(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- return (fence->seqno > parent->value) ? false : true;
-}
-
-static bool timeline_fence_enable_signaling(struct fence *fence)
-{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
-
- if (timeline_fence_signaled(fence))
- return false;
-
- list_add_tail(&pt->active_list, &parent->active_list_head);
- return true;
-}
-
-static void timeline_fence_value_str(struct fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%d", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
-}
-
-static const struct fence_ops timeline_fence_ops = {
- .get_driver_name = timeline_fence_get_driver_name,
- .get_timeline_name = timeline_fence_get_timeline_name,
- .enable_signaling = timeline_fence_enable_signaling,
- .signaled = timeline_fence_signaled,
- .wait = fence_default_wait,
- .release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
-};
-
-/*
- * *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
-{
- struct sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sync_timeline_create(task_comm);
- if (!obj)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
-{
- struct sync_timeline *obj = file->private_data;
-
- smp_wmb();
-
- sync_timeline_put(obj);
- return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_pt *pt;
- struct sync_file *sync_file;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- pt = sync_pt_create(obj, sizeof(*pt), data.value);
- if (!pt) {
- err = -ENOMEM;
- goto err;
- }
-
- sync_file = sync_file_create(&pt->base);
- if (!sync_file) {
- fence_put(&pt->base);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- fput(sync_file->file);
- err = -EFAULT;
- goto err;
- }
-
- fd_install(fd, sync_file->file);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sync_timeline_signal(obj, value);
-
- return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-const struct file_operations sw_sync_debugfs_fops = {
- .open = sw_sync_debugfs_open,
- .release = sw_sync_debugfs_release,
- .unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
-};
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
deleted file mode 100644
index 4c5a855..0000000
--- a/drivers/staging/android/sync_debug.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include "sync_debug.h"
-
-static struct dentry *dbgfs;
-
-static LIST_HEAD(sync_timeline_list_head);
-static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_file_list_head);
-static DEFINE_SPINLOCK(sync_file_list_lock);
-
-void sync_timeline_debug_add(struct sync_timeline *obj)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-}
-
-void sync_timeline_debug_remove(struct sync_timeline *obj)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_del(&obj->sync_timeline_list);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-}
-
-void sync_file_debug_add(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-void sync_file_debug_remove(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_del(&sync_file->sync_file_list);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-static const char *sync_status_str(int status)
-{
- if (status == 0)
- return "signaled";
-
- if (status > 0)
- return "active";
-
- return "error";
-}
-
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
-{
- int status = 1;
- struct sync_timeline *parent = fence_parent(fence);
-
- if (fence_is_signaled_locked(fence))
- status = fence->status;
-
- seq_printf(s, " %s%sfence %s",
- show ? parent->name : "",
- show ? "_" : "",
- sync_status_str(status));
-
- if (status <= 0) {
- struct timespec64 ts64 =
- ktime_to_timespec64(fence->timestamp);
-
- seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
- }
-
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
- seq_puts(s, "\n");
-}
-
-static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
-{
- struct list_head *pos;
- unsigned long flags;
-
- seq_printf(s, "%s: %d\n", obj->name, obj->value);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
- sync_print_fence(s, &pt->base, false);
- }
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_print_sync_file(struct seq_file *s,
- struct sync_file *sync_file)
-{
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(atomic_read(&sync_file->status)));
-
- for (i = 0; i < sync_file->num_fences; ++i)
- sync_print_fence(s, sync_file->cbs[i].fence, true);
-}
-
-static int sync_debugfs_show(struct seq_file *s, void *unused)
-{
- unsigned long flags;
- struct list_head *pos;
-
- seq_puts(s, "objs:\n--------------\n");
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_for_each(pos, &sync_timeline_list_head) {
- struct sync_timeline *obj =
- container_of(pos, struct sync_timeline,
- sync_timeline_list);
-
- sync_print_obj(s, obj);
- seq_puts(s, "\n");
- }
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
- seq_puts(s, "fences:\n--------------\n");
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_for_each(pos, &sync_file_list_head) {
- struct sync_file *sync_file =
- container_of(pos, struct sync_file, sync_file_list);
-
- sync_print_sync_file(s, sync_file);
- seq_puts(s, "\n");
- }
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
- return 0;
-}
-
-static int sync_info_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sync_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sync_info_debugfs_fops = {
- .open = sync_info_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static __init int sync_debugfs_init(void)
-{
- dbgfs = debugfs_create_dir("sync", NULL);
-
- /*
- * The debugfs files won't ever get removed and thus, there is
- * no need to protect it against removal races. The use of
- * debugfs_create_file_unsafe() is actually safe here.
- */
- debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
- &sync_info_debugfs_fops);
- debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
- &sw_sync_debugfs_fops);
-
- return 0;
-}
-late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
-{
- struct seq_file s = {
- .buf = sync_dump_buf,
- .size = sizeof(sync_dump_buf) - 1,
- };
- int i;
-
- sync_debugfs_show(&s, NULL);
-
- for (i = 0; i < s.count; i += DUMP_CHUNK) {
- if ((s.count - i) > DUMP_CHUNK) {
- char c = s.buf[i + DUMP_CHUNK];
-
- s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
- s.buf[i + DUMP_CHUNK] = c;
- } else {
- s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
- }
- }
-}
diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h
deleted file mode 100644
index fab6639..0000000
--- a/drivers/staging/android/sync_debug.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * include/linux/sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SYNC_H
-#define _LINUX_SYNC_H
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/fence.h>
-
-#include <linux/sync_file.h>
-#include <uapi/linux/sync_file.h>
-
-/**
- * struct sync_timeline - sync object
- * @kref: reference count on fence.
- * @name: name of the sync_timeline. Useful for debugging
- * @child_list_head: list of children sync_pts for this sync_timeline
- * @child_list_lock: lock protecting @child_list_head and fence.status
- * @active_list_head: list of active (unsignaled/errored) sync_pts
- * @sync_timeline_list: membership in global sync_timeline_list
- */
-struct sync_timeline {
- struct kref kref;
- char name[32];
-
- /* protected by child_list_lock */
- u64 context;
- int value;
-
- struct list_head child_list_head;
- spinlock_t child_list_lock;
-
- struct list_head active_list_head;
-
- struct list_head sync_timeline_list;
-};
-
-static inline struct sync_timeline *fence_parent(struct fence *fence)
-{
- return container_of(fence->lock, struct sync_timeline,
- child_list_lock);
-}
-
-/**
- * struct sync_pt - sync_pt object
- * @base: base fence object
- * @child_list: sync timeline child's list
- * @active_list: sync timeline active child's list
- */
-struct sync_pt {
- struct fence base;
- struct list_head child_list;
- struct list_head active_list;
-};
-
-#ifdef CONFIG_SW_SYNC
-
-extern const struct file_operations sw_sync_debugfs_fops;
-
-void sync_timeline_debug_add(struct sync_timeline *obj);
-void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
-void sync_dump(void);
-
-#else
-# define sync_timeline_debug_add(obj)
-# define sync_timeline_debug_remove(obj)
-# define sync_file_debug_add(fence)
-# define sync_file_debug_remove(fence)
-# define sync_dump()
-#endif
-
-#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
deleted file mode 100644
index 6b5ce96..0000000
--- a/drivers/staging/android/trace/sync.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
-#define TRACE_SYSTEM sync
-
-#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_SYNC_H
-
-#include "../sync_debug.h"
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(sync_timeline,
- TP_PROTO(struct sync_timeline *timeline),
-
- TP_ARGS(timeline),
-
- TP_STRUCT__entry(
- __string(name, timeline->name)
- __field(u32, value)
- ),
-
- TP_fast_assign(
- __assign_str(name, timeline->name);
- __entry->value = timeline->value;
- ),
-
- TP_printk("name=%s value=%d", __get_str(name), __entry->value)
-);
-
-#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 0a8e40f..14cd873 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -44,32 +44,26 @@ enum ion_heap_type {
* must be last so device specific heaps always
* are at the end of this enum
*/
- ION_NUM_HEAPS = 16,
};
-#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
-#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
-#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
-#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
-
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
-#define ION_FLAG_CACHED 1 /*
- * mappings of this buffer should be
- * cached, ion will do cache
- * maintenance when the buffer is
- * mapped for dma
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
- * mappings of this buffer will created
- * at mmap time, if this is set
- * caches must be managed
- * manually
- */
+
+/*
+ * mappings of this buffer should be cached, ion will do cache maintenance
+ * when the buffer is mapped for dma
+ */
+#define ION_FLAG_CACHED 1
+
+/*
+ * mappings of this buffer will created at mmap time, if this is set
+ * caches must be managed manually
+ */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2
/**
* DOC: Ion Userspace API
@@ -134,6 +128,36 @@ struct ion_custom_data {
unsigned long arg;
};
+#define MAX_HEAP_NAME 32
+
+/**
+ * struct ion_heap_data - data about a heap
+ * @name - first 32 characters of the heap name
+ * @type - heap type
+ * @heap_id - heap id for the heap
+ */
+struct ion_heap_data {
+ char name[MAX_HEAP_NAME];
+ __u32 type;
+ __u32 heap_id;
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+};
+
+/**
+ * struct ion_heap_query - collection of data about all heaps
+ * @cnt - total number of heaps to be copied
+ * @heaps - buffer to copy heap data
+ */
+struct ion_heap_query {
+ __u32 cnt; /* Total number of heaps to be copied */
+ __u32 reserved0; /* align to 64bits */
+ __u64 heaps; /* buffer to be populated */
+ __u32 reserved1;
+ __u32 reserved2;
+};
+
#define ION_IOC_MAGIC 'I'
/**
@@ -200,4 +224,13 @@ struct ion_custom_data {
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+/**
+ * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ *
+ * Takes an ion_heap_query structure and populates information about
+ * available Ion heaps.
+ */
+#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \
+ struct ion_heap_query)
+
#endif /* _UAPI_LINUX_ION_H */
OpenPOWER on IntegriCloud