Merge branch 'clockevents/fixes' of git://git.linaro.org/people/daniel.lezcano/linux...
[linux-drm-fsl-dcu.git] / arch / s390 / pci / pci_clp.c
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7
8 #define COMPONENT "zPCI"
9 #define pr_fmt(fmt) COMPONENT ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/delay.h>
15 #include <linux/pci.h>
16 #include <asm/pci_debug.h>
17 #include <asm/pci_clp.h>
18
19 static inline void zpci_err_clp(unsigned int rsp, int rc)
20 {
21         struct {
22                 unsigned int rsp;
23                 int rc;
24         } __packed data = {rsp, rc};
25
26         zpci_err_hex(&data, sizeof(data));
27 }
28
29 /*
30  * Call Logical Processor
31  * Retry logic is handled by the caller.
32  */
33 static inline u8 clp_instr(void *data)
34 {
35         struct { u8 _[CLP_BLK_SIZE]; } *req = data;
36         u64 ignored;
37         u8 cc;
38
39         asm volatile (
40                 "       .insn   rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
41                 "       ipm     %[cc]\n"
42                 "       srl     %[cc],28\n"
43                 : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req)
44                 : [req] "a" (req)
45                 : "cc");
46         return cc;
47 }
48
49 static void *clp_alloc_block(gfp_t gfp_mask)
50 {
51         return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
52 }
53
54 static void clp_free_block(void *ptr)
55 {
56         free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
57 }
58
59 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
60                                       struct clp_rsp_query_pci_grp *response)
61 {
62         zdev->tlb_refresh = response->refresh;
63         zdev->dma_mask = response->dasm;
64         zdev->msi_addr = response->msia;
65         zdev->fmb_update = response->mui;
66
67         switch (response->version) {
68         case 1:
69                 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
70                 break;
71         default:
72                 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
73                 break;
74         }
75 }
76
77 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
78 {
79         struct clp_req_rsp_query_pci_grp *rrb;
80         int rc;
81
82         rrb = clp_alloc_block(GFP_KERNEL);
83         if (!rrb)
84                 return -ENOMEM;
85
86         memset(rrb, 0, sizeof(*rrb));
87         rrb->request.hdr.len = sizeof(rrb->request);
88         rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
89         rrb->response.hdr.len = sizeof(rrb->response);
90         rrb->request.pfgid = pfgid;
91
92         rc = clp_instr(rrb);
93         if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
94                 clp_store_query_pci_fngrp(zdev, &rrb->response);
95         else {
96                 zpci_err("Q PCI FGRP:\n");
97                 zpci_err_clp(rrb->response.hdr.rsp, rc);
98                 rc = -EIO;
99         }
100         clp_free_block(rrb);
101         return rc;
102 }
103
104 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
105                                   struct clp_rsp_query_pci *response)
106 {
107         int i;
108
109         for (i = 0; i < PCI_BAR_COUNT; i++) {
110                 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
111                 zdev->bars[i].size = response->bar_size[i];
112         }
113         zdev->start_dma = response->sdma;
114         zdev->end_dma = response->edma;
115         zdev->pchid = response->pchid;
116         zdev->pfgid = response->pfgid;
117         return 0;
118 }
119
120 static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
121 {
122         struct clp_req_rsp_query_pci *rrb;
123         int rc;
124
125         rrb = clp_alloc_block(GFP_KERNEL);
126         if (!rrb)
127                 return -ENOMEM;
128
129         memset(rrb, 0, sizeof(*rrb));
130         rrb->request.hdr.len = sizeof(rrb->request);
131         rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
132         rrb->response.hdr.len = sizeof(rrb->response);
133         rrb->request.fh = fh;
134
135         rc = clp_instr(rrb);
136         if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
137                 rc = clp_store_query_pci_fn(zdev, &rrb->response);
138                 if (rc)
139                         goto out;
140                 if (rrb->response.pfgid)
141                         rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
142         } else {
143                 zpci_err("Q PCI FN:\n");
144                 zpci_err_clp(rrb->response.hdr.rsp, rc);
145                 rc = -EIO;
146         }
147 out:
148         clp_free_block(rrb);
149         return rc;
150 }
151
152 int clp_add_pci_device(u32 fid, u32 fh, int configured)
153 {
154         struct zpci_dev *zdev;
155         int rc;
156
157         zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
158         zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
159         if (!zdev)
160                 return -ENOMEM;
161
162         zdev->fh = fh;
163         zdev->fid = fid;
164
165         /* Query function properties and update zdev */
166         rc = clp_query_pci_fn(zdev, fh);
167         if (rc)
168                 goto error;
169
170         if (configured)
171                 zdev->state = ZPCI_FN_STATE_CONFIGURED;
172         else
173                 zdev->state = ZPCI_FN_STATE_STANDBY;
174
175         rc = zpci_create_device(zdev);
176         if (rc)
177                 goto error;
178         return 0;
179
180 error:
181         kfree(zdev);
182         return rc;
183 }
184
185 /*
186  * Enable/Disable a given PCI function defined by its function handle.
187  */
188 static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
189 {
190         struct clp_req_rsp_set_pci *rrb;
191         int rc, retries = 100;
192
193         rrb = clp_alloc_block(GFP_KERNEL);
194         if (!rrb)
195                 return -ENOMEM;
196
197         do {
198                 memset(rrb, 0, sizeof(*rrb));
199                 rrb->request.hdr.len = sizeof(rrb->request);
200                 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
201                 rrb->response.hdr.len = sizeof(rrb->response);
202                 rrb->request.fh = *fh;
203                 rrb->request.oc = command;
204                 rrb->request.ndas = nr_dma_as;
205
206                 rc = clp_instr(rrb);
207                 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
208                         retries--;
209                         if (retries < 0)
210                                 break;
211                         msleep(20);
212                 }
213         } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
214
215         if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
216                 *fh = rrb->response.fh;
217         else {
218                 zpci_err("Set PCI FN:\n");
219                 zpci_err_clp(rrb->response.hdr.rsp, rc);
220                 rc = -EIO;
221         }
222         clp_free_block(rrb);
223         return rc;
224 }
225
226 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
227 {
228         u32 fh = zdev->fh;
229         int rc;
230
231         rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
232         if (!rc)
233                 /* Success -> store enabled handle in zdev */
234                 zdev->fh = fh;
235
236         zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
237         return rc;
238 }
239
240 int clp_disable_fh(struct zpci_dev *zdev)
241 {
242         u32 fh = zdev->fh;
243         int rc;
244
245         if (!zdev_enabled(zdev))
246                 return 0;
247
248         rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
249         if (!rc)
250                 /* Success -> store disabled handle in zdev */
251                 zdev->fh = fh;
252
253         zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
254         return rc;
255 }
256
257 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
258                         void (*cb)(struct clp_fh_list_entry *entry))
259 {
260         u64 resume_token = 0;
261         int entries, i, rc;
262
263         do {
264                 memset(rrb, 0, sizeof(*rrb));
265                 rrb->request.hdr.len = sizeof(rrb->request);
266                 rrb->request.hdr.cmd = CLP_LIST_PCI;
267                 /* store as many entries as possible */
268                 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
269                 rrb->request.resume_token = resume_token;
270
271                 /* Get PCI function handle list */
272                 rc = clp_instr(rrb);
273                 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
274                         zpci_err("List PCI FN:\n");
275                         zpci_err_clp(rrb->response.hdr.rsp, rc);
276                         rc = -EIO;
277                         goto out;
278                 }
279
280                 WARN_ON_ONCE(rrb->response.entry_size !=
281                         sizeof(struct clp_fh_list_entry));
282
283                 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
284                         rrb->response.entry_size;
285
286                 resume_token = rrb->response.resume_token;
287                 for (i = 0; i < entries; i++)
288                         cb(&rrb->response.fh_list[i]);
289         } while (resume_token);
290 out:
291         return rc;
292 }
293
294 static void __clp_add(struct clp_fh_list_entry *entry)
295 {
296         if (!entry->vendor_id)
297                 return;
298
299         clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
300 }
301
302 static void __clp_rescan(struct clp_fh_list_entry *entry)
303 {
304         struct zpci_dev *zdev;
305
306         if (!entry->vendor_id)
307                 return;
308
309         zdev = get_zdev_by_fid(entry->fid);
310         if (!zdev) {
311                 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
312                 return;
313         }
314
315         if (!entry->config_state) {
316                 /*
317                  * The handle is already disabled, that means no iota/irq freeing via
318                  * the firmware interfaces anymore. Need to free resources manually
319                  * (DMA memory, debug, sysfs)...
320                  */
321                 zpci_stop_device(zdev);
322         }
323 }
324
325 static void __clp_update(struct clp_fh_list_entry *entry)
326 {
327         struct zpci_dev *zdev;
328
329         if (!entry->vendor_id)
330                 return;
331
332         zdev = get_zdev_by_fid(entry->fid);
333         if (!zdev)
334                 return;
335
336         zdev->fh = entry->fh;
337 }
338
339 int clp_scan_pci_devices(void)
340 {
341         struct clp_req_rsp_list_pci *rrb;
342         int rc;
343
344         rrb = clp_alloc_block(GFP_KERNEL);
345         if (!rrb)
346                 return -ENOMEM;
347
348         rc = clp_list_pci(rrb, __clp_add);
349
350         clp_free_block(rrb);
351         return rc;
352 }
353
354 int clp_rescan_pci_devices(void)
355 {
356         struct clp_req_rsp_list_pci *rrb;
357         int rc;
358
359         rrb = clp_alloc_block(GFP_KERNEL);
360         if (!rrb)
361                 return -ENOMEM;
362
363         rc = clp_list_pci(rrb, __clp_rescan);
364
365         clp_free_block(rrb);
366         return rc;
367 }
368
369 int clp_rescan_pci_devices_simple(void)
370 {
371         struct clp_req_rsp_list_pci *rrb;
372         int rc;
373
374         rrb = clp_alloc_block(GFP_NOWAIT);
375         if (!rrb)
376                 return -ENOMEM;
377
378         rc = clp_list_pci(rrb, __clp_update);
379
380         clp_free_block(rrb);
381         return rc;
382 }