summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
diff options
context:
space:
mode:
authorOded Gabbay <oded.gabbay@amd.com>2015-01-12 14:26:10 +0200
committerOded Gabbay <oded.gabbay@amd.com>2015-01-12 14:26:10 +0200
commit45c9a5e4297b9a07d94ff8195ff6f21ba3581ad6 (patch)
tree0bce60eee553c065f94c4ddba4c067d6dc823044 /drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
parent9216ed294053be68a673754a0f8da88aa7fb7941 (diff)
downloadop-kernel-dev-45c9a5e4297b9a07d94ff8195ff6f21ba3581ad6.zip
op-kernel-dev-45c9a5e4297b9a07d94ff8195ff6f21ba3581ad6.tar.gz
drm/amdkfd: Encapsulate DQM functions in ops structure
This patch does some re-org on the device_queue_manager structure. It takes out all the function pointers from the structure and puts them in a new structure, called device_queue_manager_ops. Then, it puts an instance of that structure inside device_queue_manager. This re-org is done to prepare the DQM module to support more than one AMD APU (Kaveri). Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c68
1 files changed, 34 insertions, 34 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c83f011..12c8448 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -271,7 +271,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !q || !qpd);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL)
return -ENOMEM;
@@ -305,14 +305,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) {
retval = -ENOMEM;
goto out;
}
deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
if (mqd == NULL) {
retval = -ENOMEM;
goto out;
@@ -348,7 +348,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
BUG_ON(!dqm || !q || !q->mqd);
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm, q->properties.type);
+ mqd = dqm->ops.get_mqd_manager(dqm, q->properties.type);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@@ -515,7 +515,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) {
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
return -ENOMEM;
@@ -646,7 +646,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd;
int retval;
- mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
if (!mqd)
return -ENOMEM;
@@ -849,7 +849,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
select_sdma_engine_id(q);
- mqd = dqm->get_mqd_manager(dqm,
+ mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) {
@@ -994,7 +994,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock);
- mqd = dqm->get_mqd_manager(dqm,
+ mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
retval = -ENOMEM;
@@ -1116,40 +1116,40 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case KFD_SCHED_POLICY_HWS:
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
/* initialize dqm for cp scheduling */
- dqm->create_queue = create_queue_cpsch;
- dqm->initialize = initialize_cpsch;
- dqm->start = start_cpsch;
- dqm->stop = stop_cpsch;
- dqm->destroy_queue = destroy_queue_cpsch;
- dqm->update_queue = update_queue;
- dqm->get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->register_process = register_process_nocpsch;
- dqm->unregister_process = unregister_process_nocpsch;
- dqm->uninitialize = uninitialize_nocpsch;
- dqm->create_kernel_queue = create_kernel_queue_cpsch;
- dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
- dqm->set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.create_queue = create_queue_cpsch;
+ dqm->ops.initialize = initialize_cpsch;
+ dqm->ops.start = start_cpsch;
+ dqm->ops.stop = stop_cpsch;
+ dqm->ops.destroy_queue = destroy_queue_cpsch;
+ dqm->ops.update_queue = update_queue;
+ dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->ops.register_process = register_process_nocpsch;
+ dqm->ops.unregister_process = unregister_process_nocpsch;
+ dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
+ dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
+ dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
- dqm->start = start_nocpsch;
- dqm->stop = stop_nocpsch;
- dqm->create_queue = create_queue_nocpsch;
- dqm->destroy_queue = destroy_queue_nocpsch;
- dqm->update_queue = update_queue;
- dqm->get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->register_process = register_process_nocpsch;
- dqm->unregister_process = unregister_process_nocpsch;
- dqm->initialize = initialize_nocpsch;
- dqm->uninitialize = uninitialize_nocpsch;
- dqm->set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.start = start_nocpsch;
+ dqm->ops.stop = stop_nocpsch;
+ dqm->ops.create_queue = create_queue_nocpsch;
+ dqm->ops.destroy_queue = destroy_queue_nocpsch;
+ dqm->ops.update_queue = update_queue;
+ dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->ops.register_process = register_process_nocpsch;
+ dqm->ops.unregister_process = unregister_process_nocpsch;
+ dqm->ops.initialize = initialize_nocpsch;
+ dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
default:
BUG();
break;
}
- if (dqm->initialize(dqm) != 0) {
+ if (dqm->ops.initialize(dqm) != 0) {
kfree(dqm);
return NULL;
}
@@ -1161,7 +1161,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
BUG_ON(!dqm);
- dqm->uninitialize(dqm);
+ dqm->ops.uninitialize(dqm);
kfree(dqm);
}
OpenPOWER on IntegriCloud