[AIO] add cancellation support to i386 aio_down() diff -purN 40_lock_kiocb/arch/i386/kernel/semaphore.c 50_aio_down_cancel/arch/i386/kernel/semaphore.c --- 40_lock_kiocb/arch/i386/kernel/semaphore.c 2005-06-20 15:55:07.044745688 -0400 +++ 50_aio_down_cancel/arch/i386/kernel/semaphore.c 2005-06-20 15:48:41.534352176 -0400 @@ -104,6 +104,7 @@ static int aio_down_wait(wait_queue_t *w * the wait_queue_head. */ if (!atomic_add_negative(sleepers - 1, &sem->count)) { + iocb->ki_cancel = NULL; sem->sleepers = 0; sem->aio_owner = iocb; list_del_init(&wait->task_list); @@ -116,6 +117,28 @@ static int aio_down_wait(wait_queue_t *w return 1; } +static void fixup_down_trylock_locked(struct semaphore *sem); +static int cancel_aio_down(struct kiocb *iocb, struct io_event *res) +{ + /* At this point, the kiocb is locked and even if we have kicked + * it, the pointer to the semaphore is still valid. + */ + struct semaphore *sem = iocb->ki_wait.private; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&sem->wait.lock, flags); + if (!list_empty(&iocb->ki_wait.task_list)) { + /* Ensure aio_down_wait() can no longer be called. */ + list_del_init(&iocb->ki_wait.task_list); + fixup_down_trylock_locked(sem); + } else + ret = -EAGAIN; /* we lost the race with aio_down_wait(). */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + return ret; +} + static fastcall int __attribute_used__ __sched __aio_down(struct kiocb *iocb, struct semaphore * sem) { unsigned long flags; @@ -132,6 +155,8 @@ static fastcall int __attribute_used__ _ sem->sleepers++; + iocb->ki_cancel = cancel_aio_down; + aio_down_wait(&iocb->ki_wait, 0, 0, NULL); spin_unlock_irqrestore(&sem->wait.lock, flags); return -EIOCBRETRY; @@ -200,12 +225,9 @@ static fastcall int __attribute_used__ _ * single "cmpxchg" without failure cases, * but then it wouldn't work on a 386. */ -static fastcall int __attribute_used__ __down_trylock(struct semaphore * sem) +static void fixup_down_trylock_locked(struct semaphore *sem) { int sleepers; - unsigned long flags; - - spin_lock_irqsave(&sem->wait.lock, flags); sleepers = sem->sleepers + 1; sem->sleepers = 0; @@ -214,15 +236,20 @@ static fastcall int __attribute_used__ _ * playing, because we own the spinlock in the * wait_queue_head. */ - if (!atomic_add_negative(sleepers, &sem->count)) { + if (!atomic_add_negative(sleepers, &sem->count)) wake_up_locked(&sem->wait); - } +} +static fastcall int __attribute_used__ __down_trylock(struct semaphore * sem) +{ + unsigned long flags; + + spin_lock_irqsave(&sem->wait.lock, flags); + fixup_down_trylock_locked(sem); spin_unlock_irqrestore(&sem->wait.lock, flags); return 1; } - /* * The semaphore operations have a special calling sequence that * allow us to do a simpler in-line version of them. These routines