[Asterisk-code-review] func_lock: fix multiple-channel-grant problems. (asterisk[18])
Joshua Colp
asteriskteam at digium.com
Wed Jan 6 12:03:48 CST 2021
Joshua Colp has submitted this change. ( https://gerrit.asterisk.org/c/asterisk/+/15260 )
Change subject: func_lock: fix multiple-channel-grant problems.
......................................................................
func_lock: fix multiple-channel-grant problems.
Under contention it becomes possible that multiple channels will be told
they successfully obtained the lock, which is a bug. Please refer
ASTERISK-29217
This introduces a couple of changes.
1. Replaces requesters ao2 container with simple counter (we don't
really care who is waiting for the lock, only how many). This is
updated undex ->mutex to prevent memory access races.
2. Correct semantics for ast_cond_timedwait() as described in
pthread_cond_broadcast(3P) is used (multiple threads can be released
on a single _signal()).
3. Module unload races are taken care of and memory properly cleaned
up.
Change-Id: I6f68b5ec82ff25b2909daf6e4d19ca864a463e29
Signed-off-by: Jaco Kroon <jaco at uls.co.za>
---
M funcs/func_lock.c
1 file changed, 58 insertions(+), 109 deletions(-)
Approvals:
George Joseph: Looks good to me, approved
Joshua Colp: Approved for Submit
diff --git a/funcs/func_lock.c b/funcs/func_lock.c
index acb5fc9..0726407 100644
--- a/funcs/func_lock.c
+++ b/funcs/func_lock.c
@@ -110,7 +110,6 @@
static void lock_free(void *data);
static void lock_fixup(void *data, struct ast_channel *oldchan, struct ast_channel *newchan);
static int unloading = 0;
-static pthread_t broker_tid = AST_PTHREADT_NULL;
static const struct ast_datastore_info lock_info = {
.type = "MUTEX",
@@ -124,8 +123,8 @@
ast_cond_t cond;
/*! count is needed so if a recursive mutex exits early, we know how many times to unlock it. */
unsigned int count;
- /*! Container of requesters for the named lock */
- struct ao2_container *requesters;
+ /*! Count of waiting of requesters for the named lock */
+ unsigned int requesters;
/*! who owns us */
struct ast_channel *owner;
/*! name of the lock */
@@ -147,8 +146,11 @@
while ((clframe = AST_LIST_REMOVE_HEAD(oldlist, list))) {
/* Only unlock if we own the lock */
if (clframe->channel == clframe->lock_frame->owner) {
+ ast_mutex_lock(&clframe->lock_frame->mutex);
clframe->lock_frame->count = 0;
clframe->lock_frame->owner = NULL;
+ ast_cond_signal(&clframe->lock_frame->cond);
+ ast_mutex_unlock(&clframe->lock_frame->mutex);
}
ast_free(clframe);
}
@@ -173,54 +175,11 @@
if (clframe->lock_frame->owner == oldchan) {
clframe->lock_frame->owner = newchan;
}
- /* We don't move requesters, because the thread stack is different */
clframe->channel = newchan;
}
AST_LIST_UNLOCK(list);
}
-static void *lock_broker(void *unused)
-{
- struct lock_frame *frame;
- struct timespec forever = { 1000000, 0 };
- for (;;) {
- int found_requester = 0;
-
- /* Test for cancel outside of the lock */
- pthread_testcancel();
- AST_LIST_LOCK(&locklist);
-
- AST_LIST_TRAVERSE(&locklist, frame, entries) {
- if (ao2_container_count(frame->requesters)) {
- found_requester++;
- ast_mutex_lock(&frame->mutex);
- if (!frame->owner) {
- ast_cond_signal(&frame->cond);
- }
- ast_mutex_unlock(&frame->mutex);
- }
- }
-
- AST_LIST_UNLOCK(&locklist);
- pthread_testcancel();
-
- /* If there are no requesters, then wait for a signal */
- if (!found_requester) {
- nanosleep(&forever, NULL);
- } else {
- sched_yield();
- }
- }
- /* Not reached */
- return NULL;
-}
-
-static int ast_channel_cmp_cb(void *obj, void *arg, int flags)
-{
- struct ast_channel *chan = obj, *cmp_args = arg;
- return strcasecmp(ast_channel_name(chan), ast_channel_name(cmp_args)) ? 0 : CMP_MATCH;
-}
-
static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
{
struct ast_datastore *lock_store = ast_channel_datastore_find(chan, &lock_info, NULL);
@@ -290,17 +249,13 @@
AST_LIST_UNLOCK(&locklist);
return -1;
}
- current->requesters = ao2_container_alloc_list(AO2_ALLOC_OPT_LOCK_MUTEX, 0,
- NULL, ast_channel_cmp_cb);
- if (!current->requesters) {
- ast_mutex_destroy(¤t->mutex);
- ast_cond_destroy(¤t->cond);
- ast_free(current);
- AST_LIST_UNLOCK(&locklist);
- return -1;
- }
+ current->requesters = 0;
AST_LIST_INSERT_TAIL(&locklist, current, entries);
}
+ /* Add to requester list */
+ ast_mutex_lock(¤t->mutex);
+ current->requesters++;
+ ast_mutex_unlock(¤t->mutex);
AST_LIST_UNLOCK(&locklist);
/* Found lock or created one - now find or create the corresponding link in the channel */
@@ -337,44 +292,42 @@
* the same amount, before we'll release this one.
*/
if (current->owner == chan) {
+ /* We're not a requester, we already have it */
+ ast_mutex_lock(¤t->mutex);
+ current->requesters--;
+ ast_mutex_unlock(¤t->mutex);
current->count++;
return 0;
}
- /* Okay, we have both frames, so now we need to try to lock.
- *
- * Locking order: always lock locklist first. We need the
- * locklist lock because the broker thread counts whether
- * there are requesters with the locklist lock held, and we
- * need to hold it, so that when we send our signal, below,
- * to wake up the broker thread, it definitely will see that
- * a requester exists at that point in time. Otherwise, we
- * could add to the requesters after it has already seen that
- * that lock is unoccupied and wait forever for another signal.
- */
- AST_LIST_LOCK(&locklist);
- ast_mutex_lock(¤t->mutex);
- /* Add to requester list */
- ao2_link(current->requesters, chan);
- pthread_kill(broker_tid, SIGURG);
- AST_LIST_UNLOCK(&locklist);
-
/* Wait up to three seconds from now for LOCK. */
now = ast_tvnow();
timeout.tv_sec = now.tv_sec + 3;
timeout.tv_nsec = now.tv_usec * 1000;
- if (!current->owner
- || (!trylock
- && !(res = ast_cond_timedwait(¤t->cond, ¤t->mutex, &timeout)))) {
- res = 0;
+ ast_mutex_lock(¤t->mutex);
+
+ res = 0;
+ while (!trylock && !res && current->owner) {
+ res = ast_cond_timedwait(¤t->cond, ¤t->mutex, &timeout);
+ }
+ if (current->owner) {
+ /* timeout;
+ * trylock; or
+ * cond_timedwait failed.
+ *
+ * either way, we fail to obtain the lock.
+ */
+ res = -1;
+ } else {
current->owner = chan;
current->count++;
- } else {
- res = -1;
+ res = 0;
}
/* Remove from requester list */
- ao2_unlink(current->requesters, chan);
+ current->requesters--;
+ if (res && unloading)
+ ast_cond_signal(¤t->cond);
ast_mutex_unlock(¤t->mutex);
return res;
@@ -422,7 +375,10 @@
}
if (--clframe->lock_frame->count == 0) {
+ ast_mutex_lock(&clframe->lock_frame->mutex);
clframe->lock_frame->owner = NULL;
+ ast_cond_signal(&clframe->lock_frame->cond);
+ ast_mutex_unlock(&clframe->lock_frame->mutex);
}
ast_copy_string(buf, "1", len);
@@ -478,33 +434,33 @@
/* Module flag */
unloading = 1;
- AST_LIST_LOCK(&locklist);
- while ((current = AST_LIST_REMOVE_HEAD(&locklist, entries))) {
- /* If any locks are currently in use, then we cannot unload this module */
- if (current->owner || ao2_container_count(current->requesters)) {
- /* Put it back */
- AST_LIST_INSERT_HEAD(&locklist, current, entries);
- AST_LIST_UNLOCK(&locklist);
- unloading = 0;
- return -1;
- }
- ast_mutex_destroy(¤t->mutex);
- ao2_ref(current->requesters, -1);
- ast_free(current);
- }
-
- /* No locks left, unregister functions */
+ /* Make it impossible for new requesters to be added
+ * NOTE: channels could already be in get_lock() */
ast_custom_function_unregister(&lock_function);
ast_custom_function_unregister(&trylock_function);
- ast_custom_function_unregister(&unlock_function);
- if (broker_tid != AST_PTHREADT_NULL) {
- pthread_cancel(broker_tid);
- pthread_kill(broker_tid, SIGURG);
- pthread_join(broker_tid, NULL);
+ AST_LIST_LOCK(&locklist);
+ AST_LIST_TRAVERSE(&locklist, current, entries) {
+ ast_mutex_lock(¤t->mutex);
+ while (current->owner || current->requesters) {
+ /* either the mutex is locked, or other parties are currently in get_lock,
+ * we need to wait for all of those to clear first */
+ ast_cond_wait(¤t->cond, ¤t->mutex);
+ }
+ ast_mutex_unlock(¤t->mutex);
+ /* At this point we know:
+ * 1. the lock has been released,
+ * 2. there are no requesters (nor should any be able to sneak in).
+ */
+ ast_mutex_destroy(¤t->mutex);
+ ast_cond_destroy(¤t->cond);
+ ast_free(current);
}
-
AST_LIST_UNLOCK(&locklist);
+ AST_LIST_HEAD_DESTROY(&locklist);
+
+ /* At this point we can safely stop access to UNLOCK */
+ ast_custom_function_unregister(&unlock_function);
return 0;
}
@@ -515,13 +471,6 @@
res |= ast_custom_function_register_escalating(&trylock_function, AST_CFE_READ);
res |= ast_custom_function_register_escalating(&unlock_function, AST_CFE_READ);
- if (ast_pthread_create_background(&broker_tid, NULL, lock_broker, NULL)) {
- ast_log(LOG_ERROR, "Failed to start lock broker thread. Unloading func_lock module.\n");
- broker_tid = AST_PTHREADT_NULL;
- unload_module();
- return AST_MODULE_LOAD_DECLINE;
- }
-
return res;
}
--
To view, visit https://gerrit.asterisk.org/c/asterisk/+/15260
To unsubscribe, or for help writing mail filters, visit https://gerrit.asterisk.org/settings
Gerrit-Project: asterisk
Gerrit-Branch: 18
Gerrit-Change-Id: I6f68b5ec82ff25b2909daf6e4d19ca864a463e29
Gerrit-Change-Number: 15260
Gerrit-PatchSet: 2
Gerrit-Owner: Jaco Kroon <jaco at uls.co.za>
Gerrit-Reviewer: Friendly Automation
Gerrit-Reviewer: George Joseph <gjoseph at digium.com>
Gerrit-Reviewer: Joshua Colp <jcolp at sangoma.com>
Gerrit-MessageType: merged
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.digium.com/pipermail/asterisk-code-review/attachments/20210106/589d6934/attachment-0001.html>
More information about the asterisk-code-review
mailing list