summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--include/uapi/drm/amdgpu_drm.h6
4 files changed, 97 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e0adad5..9f827ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1159,6 +1159,9 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
+
+ unsigned num_post_dep_syncobjs;
+ struct drm_syncobj **post_dep_syncobjs;
};
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29469e6..aeee684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -27,6 +27,7 @@
#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -154,6 +155,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
break;
default:
@@ -682,6 +685,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
+
+ for (i = 0; i < parser->num_post_dep_syncobjs; i++)
+ drm_syncobj_put(parser->post_dep_syncobjs[i]);
+ kfree(parser->post_dep_syncobjs);
+
dma_fence_put(parser->fence);
if (parser->ctx)
@@ -971,6 +979,64 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
return 0;
}
+static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
+ uint32_t handle)
+{
+ int r;
+ struct dma_fence *fence;
+ r = drm_syncobj_fence_get(p->filp, handle, &fence);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
+ dma_fence_put(fence);
+
+ return r;
+}
+
+static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ unsigned num_deps;
+ int i, r;
+ struct drm_amdgpu_cs_chunk_sem *deps;
+
+ deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ unsigned num_deps;
+ int i;
+ struct drm_amdgpu_cs_chunk_sem *deps;
+ deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ p->post_dep_syncobjs = kmalloc_array(num_deps,
+ sizeof(struct drm_syncobj *),
+ GFP_KERNEL);
+ p->num_post_dep_syncobjs = 0;
+
+ for (i = 0; i < num_deps; ++i) {
+ p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_dep_syncobjs[i])
+ return -EINVAL;
+ p->num_post_dep_syncobjs++;
+ }
+ return 0;
+}
+
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
@@ -985,12 +1051,30 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
+ } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+ r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
+ if (r)
+ return r;
+ } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+ r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
+ if (r)
+ return r;
}
}
return 0;
}
+static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+{
+ int i;
+
+ for (i = 0; i < p->num_post_dep_syncobjs; ++i) {
+ drm_syncobj_replace_fence(p->filp, p->post_dep_syncobjs[i],
+ p->fence);
+ }
+}
+
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
@@ -1011,6 +1095,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
+
+ amdgpu_cs_post_dependencies(p);
+
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1018,7 +1105,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8168f8e..4c7c262 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -782,7 +782,7 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
+ DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 34128f6..d9aa4a3 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -440,6 +440,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_IB 0x01
#define AMDGPU_CHUNK_ID_FENCE 0x02
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
+#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
+#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -507,6 +509,10 @@ struct drm_amdgpu_cs_chunk_fence {
__u32 offset;
};
+struct drm_amdgpu_cs_chunk_sem {
+ __u32 handle;
+};
+
struct drm_amdgpu_cs_chunk_data {
union {
struct drm_amdgpu_cs_chunk_ib ib_data;
OpenPOWER on IntegriCloud