Merge branch 'clockevents/fixes' of git://git.linaro.org/people/daniel.lezcano/linux...
[linux-drm-fsl-dcu.git] / drivers / s390 / block / scm_blk_cluster.c
1 /*
2  * Block driver for s390 storage class memory.
3  *
4  * Copyright IBM Corp. 2012
5  * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6  */
7
8 #include <linux/spinlock.h>
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/genhd.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14 #include <asm/eadm.h>
15 #include "scm_blk.h"
16
17 static unsigned int write_cluster_size = 64;
18 module_param(write_cluster_size, uint, S_IRUGO);
19 MODULE_PARM_DESC(write_cluster_size,
20                  "Number of pages used for contiguous writes.");
21
22 #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
23
24 void __scm_free_rq_cluster(struct scm_request *scmrq)
25 {
26         int i;
27
28         if (!scmrq->cluster.buf)
29                 return;
30
31         for (i = 0; i < 2 * write_cluster_size; i++)
32                 free_page((unsigned long) scmrq->cluster.buf[i]);
33
34         kfree(scmrq->cluster.buf);
35 }
36
37 int __scm_alloc_rq_cluster(struct scm_request *scmrq)
38 {
39         int i;
40
41         scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
42                                  GFP_KERNEL);
43         if (!scmrq->cluster.buf)
44                 return -ENOMEM;
45
46         for (i = 0; i < 2 * write_cluster_size; i++) {
47                 scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
48                 if (!scmrq->cluster.buf[i])
49                         return -ENOMEM;
50         }
51         INIT_LIST_HEAD(&scmrq->cluster.list);
52         return 0;
53 }
54
55 void scm_request_cluster_init(struct scm_request *scmrq)
56 {
57         scmrq->cluster.state = CLUSTER_NONE;
58 }
59
60 static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
61 {
62         unsigned long firstA, lastA, firstB, lastB;
63
64         firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
65         lastA = (((u64) blk_rq_pos(A->request) << 9) +
66                     blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
67
68         firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
69         lastB = (((u64) blk_rq_pos(B->request) << 9) +
70                     blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
71
72         return (firstB <= lastA && firstA <= lastB);
73 }
74
75 bool scm_reserve_cluster(struct scm_request *scmrq)
76 {
77         struct scm_blk_dev *bdev = scmrq->bdev;
78         struct scm_request *iter;
79
80         if (write_cluster_size == 0)
81                 return true;
82
83         spin_lock(&bdev->lock);
84         list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85                 if (clusters_intersect(scmrq, iter) &&
86                     (rq_data_dir(scmrq->request) == WRITE ||
87                      rq_data_dir(iter->request) == WRITE)) {
88                         spin_unlock(&bdev->lock);
89                         return false;
90                 }
91         }
92         list_add(&scmrq->cluster.list, &bdev->cluster_list);
93         spin_unlock(&bdev->lock);
94
95         return true;
96 }
97
98 void scm_release_cluster(struct scm_request *scmrq)
99 {
100         struct scm_blk_dev *bdev = scmrq->bdev;
101         unsigned long flags;
102
103         if (write_cluster_size == 0)
104                 return;
105
106         spin_lock_irqsave(&bdev->lock, flags);
107         list_del(&scmrq->cluster.list);
108         spin_unlock_irqrestore(&bdev->lock, flags);
109 }
110
111 void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
112 {
113         INIT_LIST_HEAD(&bdev->cluster_list);
114         blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115 }
116
117 static void scm_prepare_cluster_request(struct scm_request *scmrq)
118 {
119         struct scm_blk_dev *bdev = scmrq->bdev;
120         struct scm_device *scmdev = bdev->gendisk->private_data;
121         struct request *req = scmrq->request;
122         struct aidaw *aidaw = scmrq->aidaw;
123         struct msb *msb = &scmrq->aob->msb[0];
124         struct req_iterator iter;
125         struct bio_vec *bv;
126         int i = 0;
127         u64 addr;
128
129         switch (scmrq->cluster.state) {
130         case CLUSTER_NONE:
131                 scmrq->cluster.state = CLUSTER_READ;
132                 /* fall through */
133         case CLUSTER_READ:
134                 scmrq->aob->request.msb_count = 1;
135                 msb->bs = MSB_BS_4K;
136                 msb->oc = MSB_OC_READ;
137                 msb->flags = MSB_FLAG_IDA;
138                 msb->data_addr = (u64) aidaw;
139                 msb->blk_count = write_cluster_size;
140
141                 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
142                 msb->scm_addr = round_down(addr, CLUSTER_SIZE);
143
144                 if (msb->scm_addr !=
145                     round_down(addr + (u64) blk_rq_bytes(req) - 1,
146                                CLUSTER_SIZE))
147                         msb->blk_count = 2 * write_cluster_size;
148
149                 for (i = 0; i < msb->blk_count; i++) {
150                         aidaw->data_addr = (u64) scmrq->cluster.buf[i];
151                         aidaw++;
152                 }
153
154                 break;
155         case CLUSTER_WRITE:
156                 msb->oc = MSB_OC_WRITE;
157
158                 for (addr = msb->scm_addr;
159                      addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
160                      addr += PAGE_SIZE) {
161                         aidaw->data_addr = (u64) scmrq->cluster.buf[i];
162                         aidaw++;
163                         i++;
164                 }
165                 rq_for_each_segment(bv, req, iter) {
166                         aidaw->data_addr = (u64) page_address(bv->bv_page);
167                         aidaw++;
168                         i++;
169                 }
170                 for (; i < msb->blk_count; i++) {
171                         aidaw->data_addr = (u64) scmrq->cluster.buf[i];
172                         aidaw++;
173                 }
174                 break;
175         }
176 }
177
178 bool scm_need_cluster_request(struct scm_request *scmrq)
179 {
180         if (rq_data_dir(scmrq->request) == READ)
181                 return false;
182
183         return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
184 }
185
186 /* Called with queue lock held. */
187 void scm_initiate_cluster_request(struct scm_request *scmrq)
188 {
189         scm_prepare_cluster_request(scmrq);
190         if (eadm_start_aob(scmrq->aob))
191                 scm_request_requeue(scmrq);
192 }
193
194 bool scm_test_cluster_request(struct scm_request *scmrq)
195 {
196         return scmrq->cluster.state != CLUSTER_NONE;
197 }
198
199 void scm_cluster_request_irq(struct scm_request *scmrq)
200 {
201         struct scm_blk_dev *bdev = scmrq->bdev;
202         unsigned long flags;
203
204         switch (scmrq->cluster.state) {
205         case CLUSTER_NONE:
206                 BUG();
207                 break;
208         case CLUSTER_READ:
209                 if (scmrq->error) {
210                         scm_request_finish(scmrq);
211                         break;
212                 }
213                 scmrq->cluster.state = CLUSTER_WRITE;
214                 spin_lock_irqsave(&bdev->rq_lock, flags);
215                 scm_initiate_cluster_request(scmrq);
216                 spin_unlock_irqrestore(&bdev->rq_lock, flags);
217                 break;
218         case CLUSTER_WRITE:
219                 scm_request_finish(scmrq);
220                 break;
221         }
222 }
223
224 bool scm_cluster_size_valid(void)
225 {
226         if (write_cluster_size == 1 || write_cluster_size > 128)
227                 return false;
228
229         return !(write_cluster_size & (write_cluster_size - 1));
230 }