[AIO] add cancellation support to aio_down() Now that kiocbs serialise the cancel and retry paths, add support for cancelling a aio_down() operation that is still blocked by undoing the aio_down() steps that were already completed. Signed-off-by: Benjamin LaHaise diff -purN --exclude=description 40_lock_kiocb/lib/semaphore-sleepers.c 50_aio_down_cancel/lib/semaphore-sleepers.c --- 40_lock_kiocb/lib/semaphore-sleepers.c 2005-07-27 16:20:15.000000000 -0400 +++ 50_aio_down_cancel/lib/semaphore-sleepers.c 2005-07-27 16:24:10.000000000 -0400 @@ -91,7 +91,7 @@ fastcall void __sched __down(struct sema tsk->state = TASK_RUNNING; } -int aio_down_wait(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int aio_down_wait(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct kiocb *iocb = io_wait_to_kiocb(wait); struct semaphore *sem = wait->private; @@ -103,6 +103,7 @@ int aio_down_wait(wait_queue_t *wait, un * the wait_queue_head. */ if (!atomic_add_negative(sleepers - 1, &sem->count)) { + iocb->ki_cancel = NULL; sem->sleepers = 0; sem->aio_owner = iocb; list_del_init(&wait->task_list); @@ -115,6 +116,29 @@ int aio_down_wait(wait_queue_t *wait, un return 1; } +static void fixup_down_trylock_locked(struct semaphore *sem); +static int cancel_aio_down(struct kiocb *iocb, struct io_event *event) +{ + /* At this point, the kiocb is locked and even if we have kicked + * it, the pointer to the semaphore is still valid. + */ + struct semaphore *sem = iocb->ki_wait.private; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&sem->wait.lock, flags); + if (!list_empty(&iocb->ki_wait.task_list)) { + /* Ensure aio_down_wait() can no longer be called. */ + list_del_init(&iocb->ki_wait.task_list); + fixup_down_trylock_locked(sem); + event->res = is_sync_kiocb(iocb) ? -ERESTARTSYS : -EINTR; + } else + ret = -EAGAIN; /* we lost the race with aio_down_wait(). */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + return ret; +} + fastcall long __sched __aio_down(struct kiocb *iocb, struct semaphore * sem) { unsigned long flags; @@ -124,13 +148,15 @@ fastcall long __sched __aio_down(struct return 0; } - spin_lock_irqsave(&sem->wait.lock, flags); iocb->ki_wait.private = sem; iocb->ki_wait.func = aio_down_wait; + spin_lock_irqsave(&sem->wait.lock, flags); add_wait_queue_exclusive_locked(&sem->wait, &iocb->ki_wait); sem->sleepers++; + iocb->ki_cancel = cancel_aio_down; + aio_down_wait(&iocb->ki_wait, 0, 0, NULL); spin_unlock_irqrestore(&sem->wait.lock, flags); return -EIOCBRETRY; @@ -199,12 +225,9 @@ fastcall int __sched __down_interruptibl * single "cmpxchg" without failure cases, * but then it wouldn't work on a 386. */ -fastcall int __down_trylock(struct semaphore * sem) +static void fixup_down_trylock_locked(struct semaphore *sem) { int sleepers; - unsigned long flags; - - spin_lock_irqsave(&sem->wait.lock, flags); sleepers = sem->sleepers + 1; sem->sleepers = 0; @@ -213,10 +236,16 @@ fastcall int __down_trylock(struct semap * playing, because we own the spinlock in the * wait_queue_head. */ - if (!atomic_add_negative(sleepers, &sem->count)) { + if (!atomic_add_negative(sleepers, &sem->count)) wake_up_locked(&sem->wait); - } +} +fastcall int __down_trylock(struct semaphore * sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait.lock, flags); + fixup_down_trylock_locked(sem); spin_unlock_irqrestore(&sem->wait.lock, flags); return 1; }