<p>Joshua Colp <strong>merged</strong> this change.</p><p><a href="https://gerrit.asterisk.org/6507">View Change</a></p><div style="white-space:pre-wrap">Approvals:
  Joshua Colp: Looks good to me, but someone else must approve; Approved for Submit
  Richard Mudgett: Looks good to me, but someone else must approve
  George Joseph: Looks good to me, approved

</div><pre style="font-family: monospace,monospace; white-space: pre-wrap;">res_calendar: On reload, update all configuration<br><br>This changes the behavior of res_calendar to drop all existing calendars<br>and re-create them whenever a reload is done. The Calendar API provides<br>no way for configuration information to be pushed down to calendar<br>'techs' so updated settings would not take affect until a module<br>unload/load was done or Asterisk was restarted.<br><br>Asterisk 15+ already has a configuration option 'fetch_again_at_reload'<br>that performs a similar function.<br><br>Also fix a tiny memory leak in res_calendar_caldav while we're at it.<br><br>ASTERISK-25524 #close<br>Reported by: Jesper<br><br>Change-Id: Ib0f8057642e9d471960f1a79fd42e5a3ce587d3b<br>---<br>M include/asterisk/calendar.h<br>M res/res_calendar.c<br>M res/res_calendar_caldav.c<br>3 files changed, 29 insertions(+), 56 deletions(-)<br><br></pre><pre style="font-family: monospace,monospace; white-space: pre-wrap;">diff --git a/include/asterisk/calendar.h b/include/asterisk/calendar.h<br>index da4af01..57140ba 100644<br>--- a/include/asterisk/calendar.h<br>+++ b/include/asterisk/calendar.h<br>@@ -133,7 +133,7 @@<br>     pthread_t thread;    /*!< The thread that the calendar is loaded/updated in */<br>     ast_cond_t unload;<br>    int unloading:1;<br>-     int pending_deletion:1;<br>+      int pending_deletion:1; /*!< No longer used */<br>     struct ao2_container *events;  /*!< The events that are known at this time */<br> };<br> <br>diff --git a/res/res_calendar.c b/res/res_calendar.c<br>index 048f5e0..16a3265 100644<br>--- a/res/res_calendar.c<br>+++ b/res/res_calendar.c<br>@@ -341,10 +341,7 @@<br>       }<br>     ast_calendar_clear_events(cal);<br>       ast_string_field_free_memory(cal);<br>-   if (cal->vars) {<br>-          ast_variables_destroy(cal->vars);<br>-         cal->vars = NULL;<br>- }<br>+    ast_variables_destroy(cal->vars);<br>  ao2_ref(cal->events, -1);<br>  ao2_unlock(cal);<br> }<br>@@ -406,28 +403,22 @@<br> {<br>       struct ast_calendar *cal;<br>     struct ast_variable *v, *last = NULL;<br>-        int new_calendar = 0;<br> <br>-     if (!(cal = find_calendar(cat))) {<br>-           new_calendar = 1;<br>-            if (!(cal = ao2_alloc(sizeof(*cal), calendar_destructor))) {<br>-                 ast_log(LOG_ERROR, "Could not allocate calendar structure. Stopping.\n");<br>-                  return NULL;<br>-         }<br>+    if (!(cal = ao2_alloc(sizeof(*cal), calendar_destructor))) {<br>+         ast_log(LOG_ERROR, "Could not allocate calendar structure. Stopping.\n");<br>+          return NULL;<br>+ }<br> <br>-         if (!(cal->events = ao2_container_alloc(CALENDAR_BUCKETS, event_hash_fn, event_cmp_fn))) {<br>-                        ast_log(LOG_ERROR, "Could not allocate events container for %s\n", cat);<br>-                   cal = unref_calendar(cal);<br>-                   return NULL;<br>-         }<br>+    if (!(cal->events = ao2_container_alloc(CALENDAR_BUCKETS, event_hash_fn, event_cmp_fn))) {<br>+                ast_log(LOG_ERROR, "Could not allocate events container for %s\n", cat);<br>+           cal = unref_calendar(cal);<br>+           return NULL;<br>+ }<br> <br>-         if (ast_string_field_init(cal, 32)) {<br>-                        ast_log(LOG_ERROR, "Couldn't create string fields for %s\n", cat);<br>-                     cal = unref_calendar(cal);<br>-                   return NULL;<br>-         }<br>-    } else {<br>-             cal->pending_deletion = 0;<br>+        if (ast_string_field_init(cal, 32)) {<br>+                ast_log(LOG_ERROR, "Couldn't create string fields for %s\n", cat);<br>+             cal = unref_calendar(cal);<br>+           return NULL;<br>  }<br> <br>  ast_string_field_set(cal, name, cat);<br>@@ -489,17 +480,15 @@<br>                          cal->name);<br>        }<br> <br>- if (new_calendar) {<br>-          cal->thread = AST_PTHREADT_NULL;<br>-          ast_cond_init(&cal->unload, NULL);<br>-            ao2_link(calendars, cal);<br>-            if (ast_pthread_create(&cal->thread, NULL, cal->tech->load_calendar, cal)) {<br>-                    /* If we start failing to create threads, go ahead and return NULL<br>-                    * and the tech module will be unregistered<br>-                   */<br>-                  ao2_unlink(calendars, cal);<br>-                  cal = unref_calendar(cal);<br>-           }<br>+    cal->thread = AST_PTHREADT_NULL;<br>+  ast_cond_init(&cal->unload, NULL);<br>+    ao2_link(calendars, cal);<br>+    if (ast_pthread_create(&cal->thread, NULL, cal->tech->load_calendar, cal)) {<br>+            /* If we start failing to create threads, go ahead and return NULL<br>+            * and the tech module will be unregistered<br>+           */<br>+          ao2_unlink(calendars, cal);<br>+          cal = unref_calendar(cal);<br>    }<br> <br>  return cal;<br>@@ -1770,30 +1759,16 @@<br>  .read = calendar_event_read,<br> };<br> <br>-static int cb_pending_deletion(void *user_data, void *arg, int flags)<br>-{<br>-     struct ast_calendar *cal = user_data;<br>-<br>-     cal->pending_deletion = 1;<br>-<br>-     return CMP_MATCH;<br>-}<br>-<br>-static int cb_rm_pending_deletion(void *user_data, void *arg, int flags)<br>-{<br>-      struct ast_calendar *cal = user_data;<br>-<br>-     return cal->pending_deletion ? CMP_MATCH : 0;<br>-}<br>-<br> static int reload(void)<br> {<br>         struct ast_calendar_tech *iter;<br> <br>    ast_mutex_lock(&reloadlock);<br> <br>-  /* Mark existing calendars for deletion */<br>-   ao2_callback(calendars, OBJ_NODATA | OBJ_MULTIPLE, cb_pending_deletion, NULL);<br>+       /* Delete all of the calendars */<br>+    ao2_callback(calendars, OBJ_UNLINK | OBJ_NODATA | OBJ_MULTIPLE, NULL, NULL);<br>+<br>+      /* Load configuration */<br>      load_config(1);<br> <br>    AST_LIST_LOCK(&techs);<br>@@ -1803,9 +1778,6 @@<br>             }<br>     }<br>     AST_LIST_UNLOCK(&techs);<br>-<br>-      /* Delete calendars that no longer show up in the config */<br>-  ao2_callback(calendars, OBJ_UNLINK | OBJ_NODATA | OBJ_MULTIPLE, cb_rm_pending_deletion, NULL);<br> <br>     ast_mutex_unlock(&reloadlock);<br> <br>diff --git a/res/res_calendar_caldav.c b/res/res_calendar_caldav.c<br>index 355dd1f..02a44c7 100644<br>--- a/res/res_calendar_caldav.c<br>+++ b/res/res_calendar_caldav.c<br>@@ -80,6 +80,7 @@<br>         if (pvt->session) {<br>                ne_session_destroy(pvt->session);<br>  }<br>+    ne_uri_free(&pvt->uri);<br>        ast_string_field_free_memory(pvt);<br> <br>         ao2_callback(pvt->events, OBJ_UNLINK | OBJ_NODATA | OBJ_MULTIPLE, NULL, NULL);<br></pre><p>To view, visit <a href="https://gerrit.asterisk.org/6507">change 6507</a>. To unsubscribe, visit <a href="https://gerrit.asterisk.org/settings">settings</a>.</p><div itemscope itemtype="http://schema.org/EmailMessage"><div itemscope itemprop="action" itemtype="http://schema.org/ViewAction"><link itemprop="url" href="https://gerrit.asterisk.org/6507"/><meta itemprop="name" content="View Change"/></div></div>

<div style="display:none"> Gerrit-Project: asterisk </div>
<div style="display:none"> Gerrit-Branch: 13 </div>
<div style="display:none"> Gerrit-MessageType: merged </div>
<div style="display:none"> Gerrit-Change-Id: Ib0f8057642e9d471960f1a79fd42e5a3ce587d3b </div>
<div style="display:none"> Gerrit-Change-Number: 6507 </div>
<div style="display:none"> Gerrit-PatchSet: 2 </div>
<div style="display:none"> Gerrit-Owner: Sean Bright <sean.bright@gmail.com> </div>
<div style="display:none"> Gerrit-Reviewer: George Joseph <gjoseph@digium.com> </div>
<div style="display:none"> Gerrit-Reviewer: Jenkins2 </div>
<div style="display:none"> Gerrit-Reviewer: Joshua Colp <jcolp@digium.com> </div>
<div style="display:none"> Gerrit-Reviewer: Richard Mudgett <rmudgett@digium.com> </div>