<p>Jenkins2 <strong>merged</strong> this change.</p><p><a href="https://gerrit.asterisk.org/8741">View Change</a></p><div style="white-space:pre-wrap">Approvals:
Joshua Colp: Looks good to me, but someone else must approve
George Joseph: Looks good to me, approved
Jenkins2: Approved for Submit
</div><pre style="font-family: monospace,monospace; white-space: pre-wrap;">pjsip_scheduler.c: Fix ao2 usage errors.<br><br>* Removed several invalid uses of OBJ_NOLOCK. These uses resulted in the<br>'tasks' container being accessed without a lock in a multi-threaded<br>environment. A recipe for crashes.<br><br>* Removed needlessly obtaining schtd object references. If the caller<br>providing you a pointer to an object doesn't have a valid reference then<br>you cannot safely get one from it.<br><br>* Getting a ref to 'tasks' when you aren't copying the pointer into<br>another location is useless. The 'tasks' container pointer is global.<br><br>* Removed many unnecessary uses of RAII_VAR.<br><br>* Make ast_sip_schedule_task() name parameter const.<br><br>ASTERISK_26806<br><br>Change-Id: I5c62488e651314e2a1dbc01f5b078a15512d73db<br>---<br>M include/asterisk/res_pjsip.h<br>M res/res_pjsip/pjsip_scheduler.c<br>2 files changed, 56 insertions(+), 48 deletions(-)<br><br></pre><pre style="font-family: monospace,monospace; white-space: pre-wrap;">diff --git a/include/asterisk/res_pjsip.h b/include/asterisk/res_pjsip.h<br>index 8cb428f..d091f9a 100644<br>--- a/include/asterisk/res_pjsip.h<br>+++ b/include/asterisk/res_pjsip.h<br>@@ -1702,7 +1702,7 @@<br> *<br> */<br> struct ast_sip_sched_task *ast_sip_schedule_task(struct ast_taskprocessor *serializer,<br>- int interval, ast_sip_task sip_task, char *name, void *task_data,<br>+ int interval, ast_sip_task sip_task, const char *name, void *task_data,<br> enum ast_sip_scheduler_task_flags flags);<br> <br> /*!<br>diff --git a/res/res_pjsip/pjsip_scheduler.c b/res/res_pjsip/pjsip_scheduler.c<br>index 7520db8..43a9f59 100644<br>--- a/res/res_pjsip/pjsip_scheduler.c<br>+++ b/res/res_pjsip/pjsip_scheduler.c<br>@@ -62,7 +62,7 @@<br> enum ast_sip_scheduler_task_flags flags;<br> /*! the serializer to be used (if any) */<br> struct ast_taskprocessor *serializer;<br>- /* A name to be associated with the task */<br>+ /*! A name to be associated with the task */<br> char name[0];<br> };<br> <br>@@ -115,7 +115,7 @@<br> delay = schtd->interval - (ast_tvdiff_ms(schtd->last_end, schtd->last_start) % schtd->interval);<br> }<br> <br>- schtd->current_scheduler_id = ast_sched_add(scheduler_context, delay, push_to_serializer, (const void *)schtd);<br>+ schtd->current_scheduler_id = ast_sched_add(scheduler_context, delay, push_to_serializer, schtd);<br> if (schtd->current_scheduler_id < 0) {<br> schtd->interval = 0;<br> ao2_unlock(schtd);<br>@@ -166,28 +166,28 @@<br> <br> int ast_sip_sched_task_cancel_by_name(const char *name)<br> {<br>- RAII_VAR(struct ast_sip_sched_task *, schtd, NULL, ao2_cleanup);<br>+ int res;<br>+ struct ast_sip_sched_task *schtd;<br> <br> if (ast_strlen_zero(name)) {<br> return -1;<br> }<br> <br>- schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY | OBJ_NOLOCK);<br>+ schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY);<br> if (!schtd) {<br> return -1;<br> }<br> <br>- return ast_sip_sched_task_cancel(schtd);<br>+ res = ast_sip_sched_task_cancel(schtd);<br>+ ao2_ref(schtd, -1);<br>+ return res;<br> }<br> <br> <br> int ast_sip_sched_task_get_times(struct ast_sip_sched_task *schtd,<br> struct timeval *queued, struct timeval *last_start, struct timeval *last_end)<br> {<br>- if (!ao2_ref_and_lock(schtd)) {<br>- return -1;<br>- }<br>-<br>+ ao2_lock(schtd);<br> if (queued) {<br> memcpy(queued, &schtd->when_queued, sizeof(struct timeval));<br> }<br>@@ -197,8 +197,7 @@<br> if (last_end) {<br> memcpy(last_end, &schtd->last_end, sizeof(struct timeval));<br> }<br>-<br>- ao2_unlock_and_unref(schtd);<br>+ ao2_unlock(schtd);<br> <br> return 0;<br> }<br>@@ -206,18 +205,21 @@<br> int ast_sip_sched_task_get_times_by_name(const char *name,<br> struct timeval *queued, struct timeval *last_start, struct timeval *last_end)<br> {<br>- RAII_VAR(struct ast_sip_sched_task *, schtd, NULL, ao2_cleanup);<br>+ int res;<br>+ struct ast_sip_sched_task *schtd;<br> <br> if (ast_strlen_zero(name)) {<br> return -1;<br> }<br> <br>- schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY | OBJ_NOLOCK);<br>+ schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY);<br> if (!schtd) {<br> return -1;<br> }<br> <br>- return ast_sip_sched_task_get_times(schtd, queued, last_start, last_end);<br>+ res = ast_sip_sched_task_get_times(schtd, queued, last_start, last_end);<br>+ ao2_ref(schtd, -1);<br>+ return res;<br> }<br> <br> int ast_sip_sched_task_get_name(struct ast_sip_sched_task *schtd, char *name, size_t maxlen)<br>@@ -226,13 +228,9 @@<br> return -1;<br> }<br> <br>- if (!ao2_ref_and_lock(schtd)) {<br>- return -1;<br>- }<br>-<br>+ ao2_lock(schtd);<br> ast_copy_string(name, schtd->name, maxlen);<br>-<br>- ao2_unlock_and_unref(schtd);<br>+ ao2_unlock(schtd);<br> <br> return 0;<br> }<br>@@ -243,9 +241,7 @@<br> struct timeval since_when;<br> struct timeval now;<br> <br>- if (!ao2_ref_and_lock(schtd)) {<br>- return -1;<br>- }<br>+ ao2_lock(schtd);<br> <br> if (schtd->interval) {<br> delay = schtd->interval;<br>@@ -264,50 +260,52 @@<br> delay = -1;<br> }<br> <br>- ao2_unlock_and_unref(schtd);<br>+ ao2_unlock(schtd);<br> <br> return delay;<br> }<br> <br> int ast_sip_sched_task_get_next_run_by_name(const char *name)<br> {<br>- RAII_VAR(struct ast_sip_sched_task *, schtd, NULL, ao2_cleanup);<br>+ int next_run;<br>+ struct ast_sip_sched_task *schtd;<br> <br> if (ast_strlen_zero(name)) {<br> return -1;<br> }<br> <br>- schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY | OBJ_NOLOCK);<br>+ schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY);<br> if (!schtd) {<br> return -1;<br> }<br> <br>- return ast_sip_sched_task_get_next_run(schtd);<br>+ next_run = ast_sip_sched_task_get_next_run(schtd);<br>+ ao2_ref(schtd, -1);<br>+ return next_run;<br> }<br> <br> int ast_sip_sched_is_task_running(struct ast_sip_sched_task *schtd)<br> {<br>- if (!schtd) {<br>- return 0;<br>- }<br>-<br>- return schtd->is_running;<br>+ return schtd ? schtd->is_running : 0;<br> }<br> <br> int ast_sip_sched_is_task_running_by_name(const char *name)<br> {<br>- RAII_VAR(struct ast_sip_sched_task *, schtd, NULL, ao2_cleanup);<br>+ int is_running;<br>+ struct ast_sip_sched_task *schtd;<br> <br> if (ast_strlen_zero(name)) {<br> return 0;<br> }<br> <br>- schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY | OBJ_NOLOCK);<br>+ schtd = ao2_find(tasks, name, OBJ_SEARCH_KEY);<br> if (!schtd) {<br> return 0;<br> }<br> <br>- return schtd->is_running;<br>+ is_running = schtd->is_running;<br>+ ao2_ref(schtd, -1);<br>+ return is_running;<br> }<br> <br> static void schtd_destructor(void *data)<br>@@ -323,7 +321,8 @@<br> }<br> <br> struct ast_sip_sched_task *ast_sip_schedule_task(struct ast_taskprocessor *serializer,<br>- int interval, ast_sip_task sip_task, char *name, void *task_data, enum ast_sip_scheduler_task_flags flags)<br>+ int interval, ast_sip_task sip_task, const char *name, void *task_data,<br>+ enum ast_sip_scheduler_task_flags flags)<br> {<br> #define ID_LEN 13 /* task_deadbeef */<br> struct ast_sip_sched_task *schtd;<br>@@ -354,7 +353,7 @@<br> if (flags & AST_SIP_SCHED_TASK_DATA_AO2) {<br> ao2_ref(task_data, +1);<br> }<br>- res = ast_sched_add(scheduler_context, interval, push_to_serializer, (const void *)schtd);<br>+ res = ast_sched_add(scheduler_context, interval, push_to_serializer, schtd);<br> if (res < 0) {<br> ao2_ref(schtd, -1);<br> return NULL;<br>@@ -371,14 +370,14 @@<br> {<br> struct ao2_iterator i;<br> struct ast_sip_sched_task *schtd;<br>- const char *log_format = ast_logger_get_dateformat();<br>+ const char *log_format;<br> struct ast_tm tm;<br> char queued[32];<br> char last_start[32];<br> char next_start[32];<br> int datelen;<br>- struct timeval now = ast_tvnow();<br>- const char *separator = "======================================";<br>+ struct timeval now;<br>+ static const char separator[] = "======================================";<br> <br> switch (cmd) {<br> case CLI_INIT:<br>@@ -394,6 +393,9 @@<br> return CLI_SHOWUSAGE;<br> }<br> <br>+ now = ast_tvnow();<br>+ log_format = ast_logger_get_dateformat();<br>+<br> ast_localtime(&now, &tm, NULL);<br> datelen = ast_strftime(queued, sizeof(queued), log_format, &tm);<br> <br>@@ -408,12 +410,16 @@<br> datelen, separator, separator, datelen + 8, separator);<br> <br> <br>- ao2_ref(tasks, +1);<br> ao2_rdlock(tasks);<br>- i = ao2_iterator_init(tasks, 0);<br>+ i = ao2_iterator_init(tasks, AO2_ITERATOR_DONTLOCK);<br> while ((schtd = ao2_iterator_next(&i))) {<br>- int next_run_sec = ast_sip_sched_task_get_next_run(schtd) / 1000;<br>- struct timeval next = ast_tvadd(now, (struct timeval) {next_run_sec, 0});<br>+ int next_run_sec;<br>+ struct timeval next;<br>+<br>+ ao2_lock(schtd);<br>+<br>+ next_run_sec = ast_sip_sched_task_get_next_run(schtd) / 1000;<br>+ next = ast_tvadd(now, (struct timeval) {next_run_sec, 0});<br> <br> ast_localtime(&schtd->when_queued, &tm, NULL);<br> ast_strftime(queued, sizeof(queued), log_format, &tm);<br>@@ -436,11 +442,12 @@<br> datelen, queued, last_start,<br> next_start,<br> next_run_sec);<br>+ ao2_unlock(schtd);<br>+<br> ao2_cleanup(schtd);<br> }<br> ao2_iterator_destroy(&i);<br> ao2_unlock(tasks);<br>- ao2_ref(tasks, -1);<br> ast_cli(a->fd, "\n");<br> <br> return CLI_SUCCESS;<br>@@ -463,8 +470,9 @@<br> return -1;<br> }<br> <br>- tasks = ao2_container_alloc_hash(AO2_ALLOC_OPT_LOCK_RWLOCK, AO2_CONTAINER_ALLOC_OPT_DUPS_REJECT,<br>- TASK_BUCKETS, ast_sip_sched_task_hash_fn, ast_sip_sched_task_sort_fn, ast_sip_sched_task_cmp_fn);<br>+ tasks = ao2_container_alloc_hash(AO2_ALLOC_OPT_LOCK_RWLOCK,<br>+ AO2_CONTAINER_ALLOC_OPT_DUPS_REJECT, TASK_BUCKETS, ast_sip_sched_task_hash_fn,<br>+ ast_sip_sched_task_sort_fn, ast_sip_sched_task_cmp_fn);<br> if (!tasks) {<br> ast_log(LOG_ERROR, "Failed to allocate task container. Aborting load\n");<br> ast_sched_context_destroy(scheduler_context);<br></pre><p>To view, visit <a href="https://gerrit.asterisk.org/8741">change 8741</a>. To unsubscribe, visit <a href="https://gerrit.asterisk.org/settings">settings</a>.</p><div itemscope itemtype="http://schema.org/EmailMessage"><div itemscope itemprop="action" itemtype="http://schema.org/ViewAction"><link itemprop="url" href="https://gerrit.asterisk.org/8741"/><meta itemprop="name" content="View Change"/></div></div>
<div style="display:none"> Gerrit-Project: asterisk </div>
<div style="display:none"> Gerrit-Branch: 13 </div>
<div style="display:none"> Gerrit-MessageType: merged </div>
<div style="display:none"> Gerrit-Change-Id: I5c62488e651314e2a1dbc01f5b078a15512d73db </div>
<div style="display:none"> Gerrit-Change-Number: 8741 </div>
<div style="display:none"> Gerrit-PatchSet: 3 </div>
<div style="display:none"> Gerrit-Owner: Richard Mudgett <rmudgett@digium.com> </div>
<div style="display:none"> Gerrit-Reviewer: George Joseph <gjoseph@digium.com> </div>
<div style="display:none"> Gerrit-Reviewer: Jenkins2 </div>
<div style="display:none"> Gerrit-Reviewer: Joshua Colp <jcolp@digium.com> </div>