|
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
|
|
{
|
|
{
|
|
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
|
|
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
|
|
struct ww_acquire_ctx ctx;
|
|
struct ww_acquire_ctx ctx;
|
|
- int err;
|
|
|
|
|
|
+ int err, erra = 0;
|
|
|
|
|
|
ww_acquire_init(&ctx, &ww_class);
|
|
ww_acquire_init(&ctx, &ww_class);
|
|
ww_mutex_lock(&cycle->a_mutex, &ctx);
|
|
ww_mutex_lock(&cycle->a_mutex, &ctx);
|
|
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
|
|
|
|
|
|
err = ww_mutex_lock(cycle->b_mutex, &ctx);
|
|
err = ww_mutex_lock(cycle->b_mutex, &ctx);
|
|
if (err == -EDEADLK) {
|
|
if (err == -EDEADLK) {
|
|
|
|
+ err = 0;
|
|
ww_mutex_unlock(&cycle->a_mutex);
|
|
ww_mutex_unlock(&cycle->a_mutex);
|
|
ww_mutex_lock_slow(cycle->b_mutex, &ctx);
|
|
ww_mutex_lock_slow(cycle->b_mutex, &ctx);
|
|
- err = ww_mutex_lock(&cycle->a_mutex, &ctx);
|
|
|
|
|
|
+ erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
|
|
}
|
|
}
|
|
|
|
|
|
if (!err)
|
|
if (!err)
|
|
ww_mutex_unlock(cycle->b_mutex);
|
|
ww_mutex_unlock(cycle->b_mutex);
|
|
- ww_mutex_unlock(&cycle->a_mutex);
|
|
|
|
|
|
+ if (!erra)
|
|
|
|
+ ww_mutex_unlock(&cycle->a_mutex);
|
|
ww_acquire_fini(&ctx);
|
|
ww_acquire_fini(&ctx);
|
|
|
|
|
|
- cycle->result = err;
|
|
|
|
|
|
+ cycle->result = err ?: erra;
|
|
}
|
|
}
|
|
|
|
|
|
static int __test_cycle(unsigned int nthreads)
|
|
static int __test_cycle(unsigned int nthreads)
|