9P: Fix race between p9_write_work() and p9_fd_request()
authorSimon Derr <simon.derr@bull.net>
Mon, 17 Sep 2012 13:16:31 +0000 (15:16 +0200)
committerEric Van Hensbergen <ericvh@gmail.com>
Thu, 11 Oct 2012 17:03:31 +0000 (12:03 -0500)
Race scenario:

thread A thread B

p9_write_work()                p9_fd_request()

if (list_empty
  (&m->unsent_req_list))
  ...

                               spin_lock(&client->lock);
                               req->status = REQ_STATUS_UNSENT;
                               list_add_tail(..., &m->unsent_req_list);
                               spin_unlock(&client->lock);
                               ....
                               if (n & POLLOUT &&
                               !test_and_set_bit(Wworksched, &m->wsched)
                               schedule_work(&m->wq);
                               --> not done because Wworksched is set

  clear_bit(Wworksched, &m->wsched);
  return;

--> nobody will take care of sending the new request.

This is not very likely to happen though, because p9_write_work()
being called with an empty unsent_req_list is not frequent.
But this also means that taking the lock earlier will not be costly.

Signed-off-by: Simon Derr <simon.derr@bull.net>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
net/9p/trans_fd.c

index b2c308fffb8a543be8d1cc284a2961478769a381..0031a8cf145d34fb1a9d2990c8f7a3ede412747a 100644 (file)
@@ -453,12 +453,13 @@ static void p9_write_work(struct work_struct *work)
        }
 
        if (!m->wsize) {
+               spin_lock(&m->client->lock);
                if (list_empty(&m->unsent_req_list)) {
                        clear_bit(Wworksched, &m->wsched);
+                       spin_unlock(&m->client->lock);
                        return;
                }
 
-               spin_lock(&m->client->lock);
                req = list_entry(m->unsent_req_list.next, struct p9_req_t,
                               req_list);
                req->status = REQ_STATUS_SENT;