[AIO] add cancellation support to aio_down() Now that kiocbs serialise the cancel and retry paths, add support for cancelling a aio_down() operation that is still blocked by undoing the aio_down() steps that were already completed. Signed-off-by: Benjamin LaHaise diff -purN --exclude=description 40_lock_kiocb/lib/semaphore-sleepers.c 50_aio_down_cancel/lib/semaphore-sleepers.c --- 40_lock_kiocb/lib/semaphore-sleepers.c 2005-06-28 13:33:35.000000000 -0400 +++ 50_aio_down_cancel/lib/semaphore-sleepers.c 2005-06-28 13:33:36.000000000 -0400 @@ -91,7 +91,7 @@ fastcall void __sched __down(struct sema tsk->state = TASK_RUNNING; } -int aio_down_wait(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int aio_down_wait(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct kiocb *iocb = io_wait_to_kiocb(wait); struct semaphore *sem = wait->private; @@ -103,6 +103,7 @@ int aio_down_wait(wait_queue_t *wait, un * the wait_queue_head. */ if (!atomic_add_negative(sleepers - 1, &sem->count)) { + iocb->ki_cancel = NULL; sem->sleepers = 0; sem->aio_owner = iocb; list_del_init(&wait->task_list); @@ -115,6 +116,28 @@ int aio_down_wait(wait_queue_t *wait, un return 1; } +static void fixup_down_trylock_locked(struct semaphore *sem); +static int cancel_aio_down(struct kiocb *iocb, struct io_event *res) +{ + /* At this point, the kiocb is locked and even if we have kicked + * it, the pointer to the semaphore is still valid. + */ + struct semaphore *sem = iocb->ki_wait.private; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&sem->wait.lock, flags); + if (!list_empty(&iocb->ki_wait.task_list)) { + /* Ensure aio_down_wait() can no longer be called. */ + list_del_init(&iocb->ki_wait.task_list); + fixup_down_trylock_locked(sem); + } else + ret = -EAGAIN; /* we lost the race with aio_down_wait(). */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + return ret; +} + fastcall long __sched __aio_down(struct kiocb *iocb, struct semaphore * sem) { unsigned long flags; @@ -131,6 +154,8 @@ fastcall long __sched __aio_down(struct sem->sleepers++; + iocb->ki_cancel = cancel_aio_down; + aio_down_wait(&iocb->ki_wait, 0, 0, NULL); spin_unlock_irqrestore(&sem->wait.lock, flags); return -EIOCBRETRY; @@ -199,12 +224,9 @@ fastcall int __sched __down_interruptibl * single "cmpxchg" without failure cases, * but then it wouldn't work on a 386. */ -fastcall int __down_trylock(struct semaphore * sem) +static void fixup_down_trylock_locked(struct semaphore *sem) { int sleepers; - unsigned long flags; - - spin_lock_irqsave(&sem->wait.lock, flags); sleepers = sem->sleepers + 1; sem->sleepers = 0; @@ -213,10 +235,16 @@ fastcall int __down_trylock(struct semap * playing, because we own the spinlock in the * wait_queue_head. */ - if (!atomic_add_negative(sleepers, &sem->count)) { + if (!atomic_add_negative(sleepers, &sem->count)) wake_up_locked(&sem->wait); - } +} +fastcall int __down_trylock(struct semaphore * sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait.lock, flags); + fixup_down_trylock_locked(sem); spin_unlock_irqrestore(&sem->wait.lock, flags); return 1; }