summaryrefslogtreecommitdiffstats
path: root/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c')
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c136
1 files changed, 120 insertions, 16 deletions
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
index d534a7f..556ddf6 100644
--- a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
@@ -1,4 +1,5 @@
/**
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -102,13 +103,15 @@ typedef struct user_service_struct {
VCHIQ_SERVICE_T *service;
void *userdata;
VCHIQ_INSTANCE_T instance;
- int is_vchi;
- int dequeue_pending;
+ char is_vchi;
+ char dequeue_pending;
+ char close_pending;
int message_available_pos;
int msg_insert;
int msg_remove;
struct semaphore insert_event;
struct semaphore remove_event;
+ struct semaphore close_event;
VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
} USER_SERVICE_T;
@@ -131,11 +134,15 @@ struct vchiq_instance_struct {
int closing;
int pid;
int mark;
+ int use_close_delivered;
+ int trace;
struct list_head bulk_waiter_list;
struct mutex bulk_waiter_list_mutex;
- struct proc_dir_entry *proc_entry;
+#ifdef notyet
+ VCHIQ_DEBUGFS_NODE_T proc_entry;
+#endif
};
typedef struct dump_context_struct {
@@ -165,7 +172,9 @@ static const char *const ioctl_names[] = {
"USE_SERVICE",
"RELEASE_SERVICE",
"SET_SERVICE_OPTION",
- "DUMP_PHYS_MEM"
+ "DUMP_PHYS_MEM",
+ "LIB_VERSION",
+ "CLOSE_DELIVERED"
};
vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
@@ -232,10 +241,13 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
completion->service_userdata = user_service->service;
completion->bulk_userdata = bulk_userdata;
- if (reason == VCHIQ_SERVICE_CLOSED)
+ if (reason == VCHIQ_SERVICE_CLOSED) {
/* Take an extra reference, to be held until
this CLOSED notification is delivered. */
lock_service(user_service->service);
+ if (instance->use_close_delivered)
+ user_service->close_pending = 1;
+ }
/* A write barrier is needed here to ensure that the entire completion
record is written out before the insert point. */
@@ -282,10 +294,10 @@ service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
return VCHIQ_SUCCESS;
vchiq_log_trace(vchiq_arm_log_level,
- "service_callback - service %lx(%d), handle %x, reason %d, header %lx, "
+ "service_callback - service %lx(%d,%p), reason %d, header %lx, "
"instance %lx, bulk_userdata %lx",
(unsigned long)user_service,
- service->localport, service->handle,
+ service->localport, user_service->userdata,
reason, (unsigned long)header,
(unsigned long)instance, (unsigned long)bulk_userdata);
@@ -377,6 +389,28 @@ user_service_free(void *userdata)
/****************************************************************************
*
+* close_delivered
+*
+***************************************************************************/
+static void close_delivered(USER_SERVICE_T *user_service)
+{
+ vchiq_log_info(vchiq_arm_log_level,
+ "close_delivered(handle=%x)",
+ user_service->service->handle);
+
+ if (user_service->close_pending) {
+ /* Allow the underlying service to be culled */
+ unlock_service(user_service->service);
+
+ /* Wake the user-thread blocked in close_ or remove_service */
+ up(&user_service->close_event);
+
+ user_service->close_pending = 0;
+ }
+}
+
+/****************************************************************************
+*
* vchiq_ioctl
*
***************************************************************************/
@@ -496,14 +530,16 @@ vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
user_service->service = service;
user_service->userdata = userdata;
user_service->instance = instance;
- user_service->is_vchi = args.is_vchi;
+ user_service->is_vchi = (args.is_vchi != 0);
user_service->dequeue_pending = 0;
+ user_service->close_pending = 0;
user_service->message_available_pos =
instance->completion_remove - 1;
user_service->msg_insert = 0;
user_service->msg_remove = 0;
_sema_init(&user_service->insert_event, 0);
_sema_init(&user_service->remove_event, 0);
+ _sema_init(&user_service->close_event, 0);
if (args.is_open) {
status = vchiq_open_service_internal
@@ -543,8 +579,24 @@ vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
#endif
service = find_service_for_instance(instance, handle);
- if (service != NULL)
- status = vchiq_close_service(service->handle);
+ if (service != NULL) {
+ USER_SERVICE_T *user_service =
+ (USER_SERVICE_T *)service->base.userdata;
+ /* close_pending is false on first entry, and when the
+ wait in vchiq_close_service has been interrupted. */
+ if (!user_service->close_pending) {
+ status = vchiq_close_service(service->handle);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+
+ /* close_pending is true once the underlying service
+ has been closed until the client library calls the
+ CLOSE_DELIVERED ioctl, signalling close_event. */
+ if (user_service->close_pending &&
+ down_interruptible(&user_service->close_event))
+ status = VCHIQ_RETRY;
+ }
else
ret = -EINVAL;
} break;
@@ -559,8 +611,24 @@ vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
#endif
service = find_service_for_instance(instance, handle);
- if (service != NULL)
- status = vchiq_remove_service(service->handle);
+ if (service != NULL) {
+ USER_SERVICE_T *user_service =
+ (USER_SERVICE_T *)service->base.userdata;
+ /* close_pending is false on first entry, and when the
+ wait in vchiq_close_service has been interrupted. */
+ if (!user_service->close_pending) {
+ status = vchiq_remove_service(service->handle);
+ if (status != VCHIQ_SUCCESS)
+ break;
+ }
+
+ /* close_pending is true once the underlying service
+ has been closed until the client library calls the
+ CLOSE_DELIVERED ioctl, signalling close_event. */
+ if (user_service->close_pending &&
+ down_interruptible(&user_service->close_event))
+ status = VCHIQ_RETRY;
+ }
else
ret = -EINVAL;
} break;
@@ -824,8 +892,9 @@ vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
completion->header = msgbuf;
}
- if (completion->reason ==
- VCHIQ_SERVICE_CLOSED)
+ if ((completion->reason ==
+ VCHIQ_SERVICE_CLOSED) &&
+ !instance->use_close_delivered)
unlock_service(service1);
if (copy_to_user((void __user *)(
@@ -1007,6 +1076,29 @@ vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
#endif
} break;
+ case VCHIQ_IOC_LIB_VERSION: {
+ unsigned int lib_version = (unsigned int)arg;
+
+ if (lib_version < VCHIQ_VERSION_MIN)
+ ret = -EINVAL;
+ else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
+ instance->use_close_delivered = 1;
+ } break;
+
+ case VCHIQ_IOC_CLOSE_DELIVERED: {
+ VCHIQ_SERVICE_HANDLE_T handle;
+ memcpy(&handle, (const void*)arg, sizeof(handle));
+
+ service = find_closed_service_for_instance(instance, handle);
+ if (service != NULL) {
+ USER_SERVICE_T *user_service =
+ (USER_SERVICE_T *)service->base.userdata;
+ close_delivered(user_service);
+ }
+ else
+ ret = -EINVAL;
+ } break;
+
default:
ret = -ENOTTY;
break;
@@ -1209,7 +1301,15 @@ vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused,
(MAX_COMPLETIONS - 1)];
service1 = completion->service_userdata;
if (completion->reason == VCHIQ_SERVICE_CLOSED)
+ {
+ USER_SERVICE_T *user_service =
+ service->base.userdata;
+
+ /* Wake any blocked user-thread */
+ if (instance->use_close_delivered)
+ up(&user_service->close_event);
unlock_service(service1);
+ }
instance->completion_remove++;
}
@@ -1704,7 +1804,7 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
*/
-inline void
+void
set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
enum vc_suspend_status new_state)
{
@@ -1725,6 +1825,7 @@ set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
complete_all(&arm_state->vc_resume_complete);
break;
case VC_SUSPEND_IDLE:
+ /* TODO: reinit_completion */
INIT_COMPLETION(arm_state->vc_suspend_complete);
break;
case VC_SUSPEND_REQUESTED:
@@ -1741,7 +1842,7 @@ set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
}
}
-inline void
+void
set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
enum vc_resume_status new_state)
{
@@ -1753,6 +1854,7 @@ set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
case VC_RESUME_FAILED:
break;
case VC_RESUME_IDLE:
+ /* TODO: reinit_completion */
INIT_COMPLETION(arm_state->vc_resume_complete);
break;
case VC_RESUME_REQUESTED:
@@ -1815,6 +1917,7 @@ block_resume(VCHIQ_ARM_STATE_T *arm_state)
* (which only happens when blocked_count hits 0) then those threads
* will have to wait until next time around */
if (arm_state->blocked_count) {
+ /* TODO: reinit_completion */
INIT_COMPLETION(arm_state->blocked_blocker);
write_unlock_bh(&arm_state->susp_res_lock);
vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
@@ -1860,6 +1963,7 @@ block_resume(VCHIQ_ARM_STATE_T *arm_state)
write_lock_bh(&arm_state->susp_res_lock);
resume_count++;
}
+ /* TODO: reinit_completion */
INIT_COMPLETION(arm_state->resume_blocker);
arm_state->resume_blocked = 1;
OpenPOWER on IntegriCloud