diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c index b182b025db86..dc2e2ce7e943 100644 --- a/fs/orangefs/devorangefs-req.c +++ b/fs/orangefs/devorangefs-req.c @@ -432,7 +432,6 @@ static ssize_t orangefs_devreq_writev(struct file *file, return -EIO; } } else { - /* Change downcall status */ gossip_err("writev: could not vmalloc for trailer!\n"); dev_req_release(buffer); put_op(op); @@ -453,7 +452,7 @@ no_trailer: */ if (op->upcall.type == ORANGEFS_VFS_OP_FILE_IO) { int timed_out = 0; - DECLARE_WAITQUEUE(wait_entry, current); + DEFINE_WAIT(wait_entry); /* * tell the vfs op waiting on a waitqueue @@ -463,14 +462,14 @@ no_trailer: set_op_state_serviced(op); spin_unlock(&op->lock); - add_wait_queue_exclusive(&op->io_completion_waitq, - &wait_entry); wake_up_interruptible(&op->waitq); while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&op->lock); + prepare_to_wait_exclusive( + &op->io_completion_waitq, + &wait_entry, + TASK_INTERRUPTIBLE); if (op->io_completed) { spin_unlock(&op->lock); break; @@ -497,9 +496,9 @@ no_trailer: break; } - set_current_state(TASK_RUNNING); - remove_wait_queue(&op->io_completion_waitq, - &wait_entry); + spin_lock(&op->lock); + finish_wait(&op->io_completion_waitq, &wait_entry); + spin_unlock(&op->lock); /* NOTE: for I/O operations we handle releasing the op * object except in the case of timeout. the reason we diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index f7cd18a2a73b..863c6fc8e192 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -333,19 +333,17 @@ static int wait_for_a_slot(struct slot_args *slargs, int *buffer_index) { int ret = -1; int i = 0; - DECLARE_WAITQUEUE(my_wait, current); - - - add_wait_queue_exclusive(slargs->slot_wq, &my_wait); + DEFINE_WAIT(wait_entry); while (1) { - set_current_state(TASK_INTERRUPTIBLE); - /* * check for available desc, slot_lock is the appropriate * index_lock */ spin_lock(slargs->slot_lock); + prepare_to_wait_exclusive(slargs->slot_wq, + &wait_entry, + TASK_INTERRUPTIBLE); for (i = 0; i < slargs->slot_count; i++) if (slargs->slot_array[i] == 0) { slargs->slot_array[i] = 1; @@ -383,8 +381,9 @@ static int wait_for_a_slot(struct slot_args *slargs, int *buffer_index) break; } - set_current_state(TASK_RUNNING); - remove_wait_queue(slargs->slot_wq, &my_wait); + spin_lock(slargs->slot_lock); + finish_wait(slargs->slot_wq, &wait_entry); + spin_unlock(slargs->slot_lock); return ret; } diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c index c731cbdd5fbd..856a4b48fe23 100644 --- a/fs/orangefs/waitqueue.c +++ b/fs/orangefs/waitqueue.c @@ -62,7 +62,7 @@ int service_operation(struct orangefs_kernel_op_s *op, /* irqflags and wait_entry are only used IF the client-core aborts */ unsigned long irqflags; - DECLARE_WAITQUEUE(wait_entry, current); + DEFINE_WAIT(wait_entry); op->upcall.tgid = current->tgid; op->upcall.pid = current->pid; @@ -204,11 +204,11 @@ retry_servicing: * memory system can be initialized. */ spin_lock_irqsave(&op->lock, irqflags); - add_wait_queue(&orangefs_bufmap_init_waitq, &wait_entry); + prepare_to_wait(&orangefs_bufmap_init_waitq, + &wait_entry, + TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&op->lock, irqflags); - set_current_state(TASK_INTERRUPTIBLE); - /* * Wait for orangefs_bufmap_initialize() to wake me up * within the allotted time. @@ -225,8 +225,7 @@ retry_servicing: get_bufmap_init()); spin_lock_irqsave(&op->lock, irqflags); - remove_wait_queue(&orangefs_bufmap_init_waitq, - &wait_entry); + finish_wait(&orangefs_bufmap_init_waitq, &wait_entry); spin_unlock_irqrestore(&op->lock, irqflags); if (get_bufmap_init() == 0) { @@ -342,16 +341,11 @@ void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) int wait_for_matching_downcall(struct orangefs_kernel_op_s *op) { int ret = -EINVAL; - DECLARE_WAITQUEUE(wait_entry, current); - - spin_lock(&op->lock); - add_wait_queue(&op->waitq, &wait_entry); - spin_unlock(&op->lock); + DEFINE_WAIT(wait_entry); while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&op->lock); + prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE); if (op_state_serviced(op)) { spin_unlock(&op->lock); ret = 0; @@ -434,10 +428,8 @@ int wait_for_matching_downcall(struct orangefs_kernel_op_s *op) break; } - set_current_state(TASK_RUNNING); - spin_lock(&op->lock); - remove_wait_queue(&op->waitq, &wait_entry); + finish_wait(&op->waitq, &wait_entry); spin_unlock(&op->lock); return ret; @@ -455,16 +447,11 @@ int wait_for_matching_downcall(struct orangefs_kernel_op_s *op) int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op) { int ret = -EINVAL; - DECLARE_WAITQUEUE(wait_entry, current); - - spin_lock(&op->lock); - add_wait_queue(&op->waitq, &wait_entry); - spin_unlock(&op->lock); + DEFINE_WAIT(wait_entry); while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&op->lock); + prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE); if (op_state_serviced(op)) { gossip_debug(GOSSIP_WAIT_DEBUG, "%s:op-state is SERVICED.\n", @@ -514,10 +501,8 @@ int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op) break; } - set_current_state(TASK_RUNNING); - spin_lock(&op->lock); - remove_wait_queue(&op->waitq, &wait_entry); + finish_wait(&op->waitq, &wait_entry); spin_unlock(&op->lock); gossip_debug(GOSSIP_WAIT_DEBUG,