diff options
Diffstat (limited to 'drivers/ieee1394/raw1394.c')
-rw-r--r-- | drivers/ieee1394/raw1394.c | 91 |
1 files changed, 57 insertions, 34 deletions
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 46c88e8a24f..840b705fd5d 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -133,10 +133,9 @@ static void free_pending_request(struct pending_request *req) static void __queue_complete_req(struct pending_request *req) { struct file_info *fi = req->file_info; - list_move_tail(&req->list, &fi->req_complete); - up(&fi->complete_sem); - wake_up_interruptible(&fi->poll_wait_complete); + list_move_tail(&req->list, &fi->req_complete); + wake_up(&fi->wait_complete); } static void queue_complete_req(struct pending_request *req) @@ -464,13 +463,36 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r) #endif +/* get next completed request (caller must hold fi->reqlists_lock) */ +static inline struct pending_request *__next_complete_req(struct file_info *fi) +{ + struct list_head *lh; + struct pending_request *req = NULL; + + if (!list_empty(&fi->req_complete)) { + lh = fi->req_complete.next; + list_del(lh); + req = list_entry(lh, struct pending_request, list); + } + return req; +} + +/* atomically get next completed request */ +static struct pending_request *next_complete_req(struct file_info *fi) +{ + unsigned long flags; + struct pending_request *req; + + spin_lock_irqsave(&fi->reqlists_lock, flags); + req = __next_complete_req(fi); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); + return req; +} static ssize_t raw1394_read(struct file *file, char __user * buffer, size_t count, loff_t * offset_is_ignored) { - unsigned long flags; struct file_info *fi = (struct file_info *)file->private_data; - struct list_head *lh; struct pending_request *req; ssize_t ret; @@ -488,22 +510,21 @@ static ssize_t raw1394_read(struct file *file, char __user * buffer, } if (file->f_flags & O_NONBLOCK) { - if (down_trylock(&fi->complete_sem)) { + if (!(req = next_complete_req(fi))) return -EAGAIN; - } } else { - if (down_interruptible(&fi->complete_sem)) { + /* + * NB: We call the macro wait_event_interruptible() with a + * condition argument with side effect. This is only possible + * because the side effect does not occur until the condition + * became true, and wait_event_interruptible() won't evaluate + * the condition again after that. + */ + if (wait_event_interruptible(fi->wait_complete, + (req = next_complete_req(fi)))) return -ERESTARTSYS; - } } - spin_lock_irqsave(&fi->reqlists_lock, flags); - lh = fi->req_complete.next; - list_del(lh); - spin_unlock_irqrestore(&fi->reqlists_lock, flags); - - req = list_entry(lh, struct pending_request, list); - if (req->req.length) { if (copy_to_user(int2ptr(req->req.recvb), req->data, req->req.length)) { @@ -2745,7 +2766,7 @@ static unsigned int raw1394_poll(struct file *file, poll_table * pt) unsigned int mask = POLLOUT | POLLWRNORM; unsigned long flags; - poll_wait(file, &fi->poll_wait_complete, pt); + poll_wait(file, &fi->wait_complete, pt); spin_lock_irqsave(&fi->reqlists_lock, flags); if (!list_empty(&fi->req_complete)) { @@ -2770,9 +2791,8 @@ static int raw1394_open(struct inode *inode, struct file *file) fi->state = opened; INIT_LIST_HEAD(&fi->req_pending); INIT_LIST_HEAD(&fi->req_complete); - sema_init(&fi->complete_sem, 0); spin_lock_init(&fi->reqlists_lock); - init_waitqueue_head(&fi->poll_wait_complete); + init_waitqueue_head(&fi->wait_complete); INIT_LIST_HEAD(&fi->addr_list); file->private_data = fi; @@ -2785,7 +2805,7 @@ static int raw1394_release(struct inode *inode, struct file *file) struct file_info *fi = file->private_data; struct list_head *lh; struct pending_request *req; - int done = 0, i, fail = 0; + int i, fail; int retval = 0; struct list_head *entry; struct arm_addr *addr = NULL; @@ -2865,25 +2885,28 @@ static int raw1394_release(struct inode *inode, struct file *file) "error(s) occurred \n"); } - while (!done) { + for (;;) { + /* This locked section guarantees that neither + * complete nor pending requests exist once i!=0 */ spin_lock_irqsave(&fi->reqlists_lock, flags); - - while (!list_empty(&fi->req_complete)) { - lh = fi->req_complete.next; - list_del(lh); - - req = list_entry(lh, struct pending_request, list); - + while ((req = __next_complete_req(fi))) free_pending_request(req); - } - - if (list_empty(&fi->req_pending)) - done = 1; + i = list_empty(&fi->req_pending); spin_unlock_irqrestore(&fi->reqlists_lock, flags); - if (!done) - down_interruptible(&fi->complete_sem); + if (i) + break; + /* + * Sleep until more requests can be freed. + * + * NB: We call the macro wait_event() with a condition argument + * with side effect. This is only possible because the side + * effect does not occur until the condition became true, and + * wait_event() won't evaluate the condition again after that. + */ + wait_event(fi->wait_complete, (req = next_complete_req(fi))); + free_pending_request(req); } /* Remove any sub-trees left by user space programs */ |