mirror of
https://github.com/asterisk/asterisk.git
synced 2025-09-02 19:16:15 +00:00
sched.c: Ensure oldest expiring entry runs first.
This patch is part of a series to resolve deadlocks in chan_sip.c. * Updated sched unit test to check new behavior. ASTERISK-25023 Change-Id: Ib69437327b3cda5e14c4238d9ff91b2531b34ef3
This commit is contained in:
45
main/sched.c
45
main/sched.c
@@ -83,6 +83,14 @@ struct sched {
|
||||
/*! The ID that has been popped off the scheduler context's queue */
|
||||
struct sched_id *sched_id;
|
||||
struct timeval when; /*!< Absolute time event should take place */
|
||||
/*!
|
||||
* \brief Tie breaker in case the when is the same for multiple entries.
|
||||
*
|
||||
* \note The oldest expiring entry in the scheduler heap goes first.
|
||||
* This is possible when multiple events are scheduled to expire at
|
||||
* the same time by internal coding.
|
||||
*/
|
||||
unsigned int tie_breaker;
|
||||
int resched; /*!< When to reschedule */
|
||||
int variable; /*!< Use return value from callback to reschedule */
|
||||
const void *data; /*!< Data */
|
||||
@@ -107,6 +115,8 @@ struct ast_sched_context {
|
||||
ast_mutex_t lock;
|
||||
unsigned int eventcnt; /*!< Number of events processed */
|
||||
unsigned int highwater; /*!< highest count so far */
|
||||
/*! Next tie breaker in case events expire at the same time. */
|
||||
unsigned int tie_breaker;
|
||||
struct ast_heap *sched_heap;
|
||||
struct sched_thread *sched_thread;
|
||||
/*! The scheduled task that is currently executing */
|
||||
@@ -213,9 +223,17 @@ int ast_sched_start_thread(struct ast_sched_context *con)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_time_cmp(void *a, void *b)
|
||||
static int sched_time_cmp(void *va, void *vb)
|
||||
{
|
||||
return ast_tvcmp(((struct sched *) b)->when, ((struct sched *) a)->when);
|
||||
struct sched *a = va;
|
||||
struct sched *b = vb;
|
||||
int cmp;
|
||||
|
||||
cmp = ast_tvcmp(b->when, a->when);
|
||||
if (!cmp) {
|
||||
cmp = b->tie_breaker - a->tie_breaker;
|
||||
}
|
||||
return cmp;
|
||||
}
|
||||
|
||||
struct ast_sched_context *ast_sched_context_create(void)
|
||||
@@ -442,11 +460,28 @@ int ast_sched_wait(struct ast_sched_context *con)
|
||||
*/
|
||||
static void schedule(struct ast_sched_context *con, struct sched *s)
|
||||
{
|
||||
ast_heap_push(con->sched_heap, s);
|
||||
size_t size;
|
||||
|
||||
if (ast_heap_size(con->sched_heap) > con->highwater) {
|
||||
con->highwater = ast_heap_size(con->sched_heap);
|
||||
size = ast_heap_size(con->sched_heap);
|
||||
|
||||
/* Record the largest the scheduler heap became for reporting purposes. */
|
||||
if (con->highwater <= size) {
|
||||
con->highwater = size + 1;
|
||||
}
|
||||
|
||||
/* Determine the tie breaker value for the new entry. */
|
||||
if (size) {
|
||||
++con->tie_breaker;
|
||||
} else {
|
||||
/*
|
||||
* Restart the sequence for the first entry to make integer
|
||||
* roll over more unlikely.
|
||||
*/
|
||||
con->tie_breaker = 0;
|
||||
}
|
||||
s->tie_breaker = con->tie_breaker;
|
||||
|
||||
ast_heap_push(con->sched_heap, s);
|
||||
}
|
||||
|
||||
/*! \brief
|
||||
|
@@ -45,6 +45,67 @@ static int sched_cb(const void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int order_check;
|
||||
static int order_check_failed;
|
||||
|
||||
static void sched_order_check(struct ast_test *test, int order)
|
||||
{
|
||||
++order_check;
|
||||
if (order_check != order) {
|
||||
ast_test_status_update(test, "Unexpected execution order: expected:%d got:%d\n",
|
||||
order, order_check);
|
||||
order_check_failed = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static int sched_order_1_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_2_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_3_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 3);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_4_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_5_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 5);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_6_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 6);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_7_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 7);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_order_8_cb(const void *data)
|
||||
{
|
||||
sched_order_check((void *) data, 8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
AST_TEST_DEFINE(sched_test_order)
|
||||
{
|
||||
struct ast_sched_context *con;
|
||||
@@ -152,6 +213,49 @@ AST_TEST_DEFINE(sched_test_order)
|
||||
goto return_cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Schedule immediate and delayed entries to check the order
|
||||
* that they get executed. They must get executed at the
|
||||
* time they expire in the order they were added.
|
||||
*/
|
||||
#define DELAYED_SAME_EXPIRE 300 /* ms */
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_1_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_1_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_2_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_2_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_3_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_3_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_4_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_4_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_5_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_5_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_6_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_6_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_7_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, 0, sched_order_7_cb, test), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, -1 < ast_sched_add(con, DELAYED_SAME_EXPIRE, sched_order_8_cb, test), res, return_cleanup);
|
||||
|
||||
/* Check order of scheduled immediate entries. */
|
||||
order_check = 0;
|
||||
order_check_failed = 0;
|
||||
usleep(50 * 1000);/* Ensure that all the immediate entries are ready to expire */
|
||||
ast_test_validate_cleanup(test, 7 == ast_sched_runq(con), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, !order_check_failed, res, return_cleanup);
|
||||
|
||||
/* Check order of scheduled entries expiring at the same time. */
|
||||
order_check = 0;
|
||||
order_check_failed = 0;
|
||||
usleep((DELAYED_SAME_EXPIRE + 50) * 1000);/* Ensure that all the delayed entries are ready to expire */
|
||||
ast_test_validate_cleanup(test, 8 == ast_sched_runq(con), res, return_cleanup);
|
||||
ast_test_validate_cleanup(test, !order_check_failed, res, return_cleanup);
|
||||
|
||||
if ((wait = ast_sched_wait(con)) != -1) {
|
||||
ast_test_status_update(test,
|
||||
"ast_sched_wait() should have returned -1, returned '%d'\n",
|
||||
wait);
|
||||
goto return_cleanup;
|
||||
}
|
||||
|
||||
res = AST_TEST_PASS;
|
||||
|
||||
return_cleanup:
|
||||
|
Reference in New Issue
Block a user