summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 15:02:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 15:02:41 -0700
commited0bb8ea059764c3fc882fb135473afd347335e9 (patch)
tree5274b8335afe85f76d1eb945eb03ffe4040737b4 /drivers/base
parent47b816ff7d520509176154748713e7d66b3ad6ac (diff)
parent3e0b2a1993c06e646d90d71e163d03869a211a4c (diff)
downloadop-kernel-dev-ed0bb8ea059764c3fc882fb135473afd347335e9.zip
op-kernel-dev-ed0bb8ea059764c3fc882fb135473afd347335e9.tar.gz
Merge branch 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf
Pull dma-buf updates from Sumit Semwal: "This includes the following key items: - kernel cpu access support, - flag-passing to dma_buf_fd, - relevant Documentation updates, and - some minor cleanups and fixes. These changes are needed for the drm prime/dma-buf interface code that Dave Airlie plans to submit in this merge window." * 'for-linus-3.4' of git://git.linaro.org/people/sumitsemwal/linux-dma-buf: dma-buf: correct dummy function declarations. dma-buf: document fd flags and O_CLOEXEC requirement dma_buf: Add documentation for the new cpu access support dma-buf: add support for kernel cpu access dma-buf: don't hold the mutex around map/unmap calls dma-buf: add get_dma_buf() dma-buf: pass flags into dma_buf_fd. dma-buf: add dma_data_direction to unmap dma_buf_op dma-buf: Move code out of mutex-protected section in dma_buf_attach() dma-buf: Return error instead of using a goto statement when possible dma-buf: Remove unneeded sanity checks dma-buf: Constify ops argument to dma_buf_export()
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dma-buf.c165
1 files changed, 141 insertions, 24 deletions
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index e38ad24..07cbbc6 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -71,7 +71,7 @@ static inline int is_dma_buf_file(struct file *file)
* ops, or error in allocating struct dma_buf, will return negative error.
*
*/
-struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags)
{
struct dma_buf *dmabuf;
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
if (WARN_ON(!priv || !ops
|| !ops->map_dma_buf
|| !ops->unmap_dma_buf
- || !ops->release)) {
+ || !ops->release
+ || !ops->kmap_atomic
+ || !ops->kmap)) {
return ERR_PTR(-EINVAL);
}
@@ -107,17 +109,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
/**
* dma_buf_fd - returns a file descriptor for the given dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
+ * @flags: [in] flags to give to fd
*
* On success, returns an associated 'fd'. Else, returns error.
*/
-int dma_buf_fd(struct dma_buf *dmabuf)
+int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
int error, fd;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- error = get_unused_fd();
+ error = get_unused_fd_flags(flags);
if (error < 0)
return error;
fd = error;
@@ -185,17 +188,18 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach;
int ret;
- if (WARN_ON(!dmabuf || !dev || !dmabuf->ops))
+ if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
if (attach == NULL)
- goto err_alloc;
-
- mutex_lock(&dmabuf->lock);
+ return ERR_PTR(-ENOMEM);
attach->dev = dev;
attach->dmabuf = dmabuf;
+
+ mutex_lock(&dmabuf->lock);
+
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, dev, attach);
if (ret)
@@ -206,8 +210,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
mutex_unlock(&dmabuf->lock);
return attach;
-err_alloc:
- return ERR_PTR(-ENOMEM);
err_attach:
kfree(attach);
mutex_unlock(&dmabuf->lock);
@@ -224,7 +226,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
- if (WARN_ON(!dmabuf || !attach || !dmabuf->ops))
+ if (WARN_ON(!dmabuf || !attach))
return;
mutex_lock(&dmabuf->lock);
@@ -255,13 +257,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
might_sleep();
- if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops))
+ if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
- mutex_lock(&attach->dmabuf->lock);
- if (attach->dmabuf->ops->map_dma_buf)
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- mutex_unlock(&attach->dmabuf->lock);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
return sg_table;
}
@@ -273,19 +272,137 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
* dma_buf_ops.
* @attach: [in] attachment to unmap buffer from
* @sg_table: [in] scatterlist info of the buffer to unmap
+ * @direction: [in] direction of DMA transfer
*
*/
void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
- struct sg_table *sg_table)
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
{
- if (WARN_ON(!attach || !attach->dmabuf || !sg_table
- || !attach->dmabuf->ops))
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
- mutex_lock(&attach->dmabuf->lock);
- if (attach->dmabuf->ops->unmap_dma_buf)
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
- mutex_unlock(&attach->dmabuf->lock);
-
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+
+
+/**
+ * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
+ * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
+ * preparations. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dma_buf: [in] buffer to prepare cpu access for.
+ * @start: [in] start of range for cpu access.
+ * @len: [in] length of range for cpu access.
+ * @direction: [in] length of range for cpu access.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+ enum dma_data_direction direction)
+{
+ int ret = 0;
+
+ if (WARN_ON(!dmabuf))
+ return -EINVAL;
+
+ if (dmabuf->ops->begin_cpu_access)
+ ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+
+/**
+ * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
+ * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
+ * actions. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dma_buf: [in] buffer to complete cpu access for.
+ * @start: [in] start of range for cpu access.
+ * @len: [in] length of range for cpu access.
+ * @direction: [in] length of range for cpu access.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!dmabuf);
+
+ if (dmabuf->ops->end_cpu_access)
+ dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+}
+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+
+/**
+ * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
+ * space. The same restrictions as for kmap_atomic and friends apply.
+ * @dma_buf: [in] buffer to map page from.
+ * @page_num: [in] page in PAGE_SIZE units to map.
+ *
+ * This call must always succeed, any necessary preparations that might fail
+ * need to be done in begin_cpu_access.
+ */
+void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
+{
+ WARN_ON(!dmabuf);
+
+ return dmabuf->ops->kmap_atomic(dmabuf, page_num);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
+
+/**
+ * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
+ * @dma_buf: [in] buffer to unmap page from.
+ * @page_num: [in] page in PAGE_SIZE units to unmap.
+ * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
+ void *vaddr)
+{
+ WARN_ON(!dmabuf);
+
+ if (dmabuf->ops->kunmap_atomic)
+ dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
+
+/**
+ * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
+ * same restrictions as for kmap and friends apply.
+ * @dma_buf: [in] buffer to map page from.
+ * @page_num: [in] page in PAGE_SIZE units to map.
+ *
+ * This call must always succeed, any necessary preparations that might fail
+ * need to be done in begin_cpu_access.
+ */
+void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
+{
+ WARN_ON(!dmabuf);
+
+ return dmabuf->ops->kmap(dmabuf, page_num);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kmap);
+
+/**
+ * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
+ * @dma_buf: [in] buffer to unmap page from.
+ * @page_num: [in] page in PAGE_SIZE units to unmap.
+ * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
+ void *vaddr)
+{
+ WARN_ON(!dmabuf);
+
+ if (dmabuf->ops->kunmap)
+ dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kunmap);
OpenPOWER on IntegriCloud