[Asterisk-code-review] sched.c: Ensure oldest expiring entry runs first. (asterisk[master])

Richard Mudgett asteriskteam at digium.com
Tue Mar 15 14:59:35 CDT 2016


Richard Mudgett has uploaded a new change for review.

  https://gerrit.asterisk.org/2404

Change subject: sched.c: Ensure oldest expiring entry runs first.
......................................................................

sched.c: Ensure oldest expiring entry runs first.

This patch is part of a series to resolve deadlocks in chan_sip.c.

* Updated sched unit test to check new behavior.

ASTERISK-25023

Change-Id: Ib69437327b3cda5e14c4238d9ff91b2531b34ef3
---
M main/sched.c
M tests/test_sched.c
2 files changed, 144 insertions(+), 5 deletions(-)


  git pull ssh://gerrit.asterisk.org:29418/asterisk refs/changes/04/2404/1

diff --git a/main/sched.c b/main/sched.c
index 7b7021d..9fee5b9 100644
--- a/main/sched.c
+++ b/main/sched.c
@@ -83,6 +83,14 @@
 	/*! The ID that has been popped off the scheduler context's queue */
 	struct sched_id *sched_id;
 	struct timeval when;          /*!< Absolute time event should take place */
+	/*!
+	 * \brief Tie breaker in case the when is the same for multiple entries.
+	 *
+	 * \note The oldest expiring entry in the scheduler heap goes first.
+	 * This is possible when multiple events are scheduled to expire at
+	 * the same time by internal coding.
+	 */
+	unsigned int tie_breaker;
 	int resched;                  /*!< When to reschedule */
 	int variable;                 /*!< Use return value from callback to reschedule */
 	const void *data;             /*!< Data */
@@ -107,6 +115,8 @@
 	ast_mutex_t lock;
 	unsigned int eventcnt;                  /*!< Number of events processed */
 	unsigned int highwater;					/*!< highest count so far */
+	/*! Next tie breaker in case events expire at the same time. */
+	unsigned int tie_breaker;
 	struct ast_heap *sched_heap;
 	struct sched_thread *sched_thread;
 	/*! The scheduled task that is currently executing */
@@ -213,9 +223,17 @@
 	return 0;
 }
 
-static int sched_time_cmp(void *a, void *b)
+static int sched_time_cmp(void *va, void *vb)
 {
-	return ast_tvcmp(((struct sched *) b)->when, ((struct sched *) a)->when);
+	struct sched *a = va;
+	struct sched *b = vb;
+	int cmp;
+
+	cmp = ast_tvcmp(b->when, a->when);
+	if (!cmp) {
+		cmp = b->tie_breaker - a->tie_breaker;
+	}
+	return cmp;
 }
 
 struct ast_sched_context *ast_sched_context_create(void)
@@ -442,11 +460,28 @@
  */
 static void schedule(struct ast_sched_context *con, struct sched *s)
 {
-	ast_heap_push(con->sched_heap, s);
+	size_t size;
 
-	if (ast_heap_size(con->sched_heap) > con->highwater) {
-		con->highwater = ast_heap_size(con->sched_heap);
+	size = ast_heap_size(con->sched_heap);
+
+	/* Record the largest the scheduler heap became for reporting purposes. */
+	if (con->highwater <= size) {
+		con->highwater = size + 1;
 	}
+
+	/* Determine the tie breaker value for the new entry. */
+	if (size) {
+		++con->tie_breaker;
+	} else {
+		/*
+		 * Restart the sequence for the first entry to make integer
+		 * roll over more unlikely.
+		 */
+		con->tie_breaker = 0;
+	}
+	s->tie_breaker = con->tie_breaker;
+
+	ast_heap_push(con->sched_heap, s);
 }
 
 /*! \brief
diff --git a/tests/test_sched.c b/tests/test_sched.c
index ec53ce8..23a5e3d 100644
--- a/tests/test_sched.c
+++ b/tests/test_sched.c
@@ -45,6 +45,67 @@
 	return 0;
 }
 
+static int order_check;
+static int order_check_failed;
+
+static void sched_order_check(struct ast_test *test, int order)
+{
+	++order_check;
+	if (order_check != order) {
+		ast_test_status_update(test, "Unexpected execution order: expected:%d got:%d\n",
+			order, order_check);
+		order_check_failed = 1;
+	}
+}
+
+static int sched_order_1_cb(const void *data)
+{
+	sched_order_check((void *) data, 1);
+	return 0;
+}
+
+static int sched_order_2_cb(const void *data)
+{
+	sched_order_check((void *) data, 2);
+	return 0;
+}
+
+static int sched_order_3_cb(const void *data)
+{
+	sched_order_check((void *) data, 3);
+	return 0;
+}
+
+static int sched_order_4_cb(const void *data)
+{
+	sched_order_check((void *) data, 4);
+	return 0;
+}
+
+static int sched_order_5_cb(const void *data)
+{
+	sched_order_check((void *) data, 5);
+	return 0;
+}
+
+static int sched_order_6_cb(const void *data)
+{
+	sched_order_check((void *) data, 6);
+	return 0;
+}
+
+static int sched_order_7_cb(const void *data)
+{
+	sched_order_check((void *) data, 7);
+	return 0;
+}
+
+static int sched_order_8_cb(const void *data)
+{
+	sched_order_check((void *) data, 8);
+	return 0;
+}
+
 AST_TEST_DEFINE(sched_test_order)
 {
 	struct ast_sched_context *con;
@@ -152,6 +213,49 @@
 		goto return_cleanup;
 	}
 
+	/*
+	 * Schedule immediate and delayed entries to check the order
+	 * that they get executed.  They must get executed at the
+	 * time they expire in the order they were added.
+	 */
+#define DELAYED_SAME_EXPIRE		300 /* ms */
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_1_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_1_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_2_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_2_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_3_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_3_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_4_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_4_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_5_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_5_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_6_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_6_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_7_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_7_cb, test), res, return_cleanup);
+	ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_8_cb, test), res, return_cleanup);
+
+	/* Check order of scheduled immediate entries. */
+	order_check = 0;
+	order_check_failed = 0;
+	usleep(50 * 1000);/* Ensure that all the immediate entries are ready to expire */
+	ast_test_validate_cleanup(test, 7 == ast_sched_runq(con), res, return_cleanup);
+	ast_test_validate_cleanup(test, !order_check_failed, res, return_cleanup);
+
+	/* Check order of scheduled entries expiring at the same time. */
+	order_check = 0;
+	order_check_failed = 0;
+	usleep((DELAYED_SAME_EXPIRE + 50) * 1000);/* Ensure that all the delayed entries are ready to expire */
+	ast_test_validate_cleanup(test, 8 == ast_sched_runq(con), res, return_cleanup);
+	ast_test_validate_cleanup(test, !order_check_failed, res, return_cleanup);
+
+	if ((wait = ast_sched_wait(con)) != -1) {
+		ast_test_status_update(test,
+				"ast_sched_wait() should have returned -1, returned '%d'\n",
+				wait);
+		goto return_cleanup;
+	}
+
 	res = AST_TEST_PASS;
 
 return_cleanup:

-- 
To view, visit https://gerrit.asterisk.org/2404
To unsubscribe, visit https://gerrit.asterisk.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ib69437327b3cda5e14c4238d9ff91b2531b34ef3
Gerrit-PatchSet: 1
Gerrit-Project: asterisk
Gerrit-Branch: master
Gerrit-Owner: Richard Mudgett <rmudgett at digium.com>



More information about the asterisk-code-review mailing list