Merge branch 'cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[linux-drm-fsl-dcu.git] / drivers / hv / vmbus_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *   K. Y. Srinivasan <kys@microsoft.com>
21  *
22  */
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <asm/hyperv.h>
36 #include <asm/hypervisor.h>
37 #include <asm/mshyperv.h>
38 #include "hyperv_vmbus.h"
39
40 static struct acpi_device  *hv_acpi_dev;
41
42 static struct tasklet_struct msg_dpc;
43 static struct completion probe_event;
44 static int irq;
45
46 struct resource hyperv_mmio = {
47         .name  = "hyperv mmio",
48         .flags = IORESOURCE_MEM,
49 };
50 EXPORT_SYMBOL_GPL(hyperv_mmio);
51
52 static int vmbus_exists(void)
53 {
54         if (hv_acpi_dev == NULL)
55                 return -ENODEV;
56
57         return 0;
58 }
59
60 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
61 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
62 {
63         int i;
64         for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
65                 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
66 }
67
68 static u8 channel_monitor_group(struct vmbus_channel *channel)
69 {
70         return (u8)channel->offermsg.monitorid / 32;
71 }
72
73 static u8 channel_monitor_offset(struct vmbus_channel *channel)
74 {
75         return (u8)channel->offermsg.monitorid % 32;
76 }
77
78 static u32 channel_pending(struct vmbus_channel *channel,
79                            struct hv_monitor_page *monitor_page)
80 {
81         u8 monitor_group = channel_monitor_group(channel);
82         return monitor_page->trigger_group[monitor_group].pending;
83 }
84
85 static u32 channel_latency(struct vmbus_channel *channel,
86                            struct hv_monitor_page *monitor_page)
87 {
88         u8 monitor_group = channel_monitor_group(channel);
89         u8 monitor_offset = channel_monitor_offset(channel);
90         return monitor_page->latency[monitor_group][monitor_offset];
91 }
92
93 static u32 channel_conn_id(struct vmbus_channel *channel,
94                            struct hv_monitor_page *monitor_page)
95 {
96         u8 monitor_group = channel_monitor_group(channel);
97         u8 monitor_offset = channel_monitor_offset(channel);
98         return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
99 }
100
101 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
102                        char *buf)
103 {
104         struct hv_device *hv_dev = device_to_hv_device(dev);
105
106         if (!hv_dev->channel)
107                 return -ENODEV;
108         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
109 }
110 static DEVICE_ATTR_RO(id);
111
112 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
113                           char *buf)
114 {
115         struct hv_device *hv_dev = device_to_hv_device(dev);
116
117         if (!hv_dev->channel)
118                 return -ENODEV;
119         return sprintf(buf, "%d\n", hv_dev->channel->state);
120 }
121 static DEVICE_ATTR_RO(state);
122
123 static ssize_t monitor_id_show(struct device *dev,
124                                struct device_attribute *dev_attr, char *buf)
125 {
126         struct hv_device *hv_dev = device_to_hv_device(dev);
127
128         if (!hv_dev->channel)
129                 return -ENODEV;
130         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
131 }
132 static DEVICE_ATTR_RO(monitor_id);
133
134 static ssize_t class_id_show(struct device *dev,
135                                struct device_attribute *dev_attr, char *buf)
136 {
137         struct hv_device *hv_dev = device_to_hv_device(dev);
138
139         if (!hv_dev->channel)
140                 return -ENODEV;
141         return sprintf(buf, "{%pUl}\n",
142                        hv_dev->channel->offermsg.offer.if_type.b);
143 }
144 static DEVICE_ATTR_RO(class_id);
145
146 static ssize_t device_id_show(struct device *dev,
147                               struct device_attribute *dev_attr, char *buf)
148 {
149         struct hv_device *hv_dev = device_to_hv_device(dev);
150
151         if (!hv_dev->channel)
152                 return -ENODEV;
153         return sprintf(buf, "{%pUl}\n",
154                        hv_dev->channel->offermsg.offer.if_instance.b);
155 }
156 static DEVICE_ATTR_RO(device_id);
157
158 static ssize_t modalias_show(struct device *dev,
159                              struct device_attribute *dev_attr, char *buf)
160 {
161         struct hv_device *hv_dev = device_to_hv_device(dev);
162         char alias_name[VMBUS_ALIAS_LEN + 1];
163
164         print_alias_name(hv_dev, alias_name);
165         return sprintf(buf, "vmbus:%s\n", alias_name);
166 }
167 static DEVICE_ATTR_RO(modalias);
168
169 static ssize_t server_monitor_pending_show(struct device *dev,
170                                            struct device_attribute *dev_attr,
171                                            char *buf)
172 {
173         struct hv_device *hv_dev = device_to_hv_device(dev);
174
175         if (!hv_dev->channel)
176                 return -ENODEV;
177         return sprintf(buf, "%d\n",
178                        channel_pending(hv_dev->channel,
179                                        vmbus_connection.monitor_pages[1]));
180 }
181 static DEVICE_ATTR_RO(server_monitor_pending);
182
183 static ssize_t client_monitor_pending_show(struct device *dev,
184                                            struct device_attribute *dev_attr,
185                                            char *buf)
186 {
187         struct hv_device *hv_dev = device_to_hv_device(dev);
188
189         if (!hv_dev->channel)
190                 return -ENODEV;
191         return sprintf(buf, "%d\n",
192                        channel_pending(hv_dev->channel,
193                                        vmbus_connection.monitor_pages[1]));
194 }
195 static DEVICE_ATTR_RO(client_monitor_pending);
196
197 static ssize_t server_monitor_latency_show(struct device *dev,
198                                            struct device_attribute *dev_attr,
199                                            char *buf)
200 {
201         struct hv_device *hv_dev = device_to_hv_device(dev);
202
203         if (!hv_dev->channel)
204                 return -ENODEV;
205         return sprintf(buf, "%d\n",
206                        channel_latency(hv_dev->channel,
207                                        vmbus_connection.monitor_pages[0]));
208 }
209 static DEVICE_ATTR_RO(server_monitor_latency);
210
211 static ssize_t client_monitor_latency_show(struct device *dev,
212                                            struct device_attribute *dev_attr,
213                                            char *buf)
214 {
215         struct hv_device *hv_dev = device_to_hv_device(dev);
216
217         if (!hv_dev->channel)
218                 return -ENODEV;
219         return sprintf(buf, "%d\n",
220                        channel_latency(hv_dev->channel,
221                                        vmbus_connection.monitor_pages[1]));
222 }
223 static DEVICE_ATTR_RO(client_monitor_latency);
224
225 static ssize_t server_monitor_conn_id_show(struct device *dev,
226                                            struct device_attribute *dev_attr,
227                                            char *buf)
228 {
229         struct hv_device *hv_dev = device_to_hv_device(dev);
230
231         if (!hv_dev->channel)
232                 return -ENODEV;
233         return sprintf(buf, "%d\n",
234                        channel_conn_id(hv_dev->channel,
235                                        vmbus_connection.monitor_pages[0]));
236 }
237 static DEVICE_ATTR_RO(server_monitor_conn_id);
238
239 static ssize_t client_monitor_conn_id_show(struct device *dev,
240                                            struct device_attribute *dev_attr,
241                                            char *buf)
242 {
243         struct hv_device *hv_dev = device_to_hv_device(dev);
244
245         if (!hv_dev->channel)
246                 return -ENODEV;
247         return sprintf(buf, "%d\n",
248                        channel_conn_id(hv_dev->channel,
249                                        vmbus_connection.monitor_pages[1]));
250 }
251 static DEVICE_ATTR_RO(client_monitor_conn_id);
252
253 static ssize_t out_intr_mask_show(struct device *dev,
254                                   struct device_attribute *dev_attr, char *buf)
255 {
256         struct hv_device *hv_dev = device_to_hv_device(dev);
257         struct hv_ring_buffer_debug_info outbound;
258
259         if (!hv_dev->channel)
260                 return -ENODEV;
261         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
262         return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
263 }
264 static DEVICE_ATTR_RO(out_intr_mask);
265
266 static ssize_t out_read_index_show(struct device *dev,
267                                    struct device_attribute *dev_attr, char *buf)
268 {
269         struct hv_device *hv_dev = device_to_hv_device(dev);
270         struct hv_ring_buffer_debug_info outbound;
271
272         if (!hv_dev->channel)
273                 return -ENODEV;
274         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
275         return sprintf(buf, "%d\n", outbound.current_read_index);
276 }
277 static DEVICE_ATTR_RO(out_read_index);
278
279 static ssize_t out_write_index_show(struct device *dev,
280                                     struct device_attribute *dev_attr,
281                                     char *buf)
282 {
283         struct hv_device *hv_dev = device_to_hv_device(dev);
284         struct hv_ring_buffer_debug_info outbound;
285
286         if (!hv_dev->channel)
287                 return -ENODEV;
288         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
289         return sprintf(buf, "%d\n", outbound.current_write_index);
290 }
291 static DEVICE_ATTR_RO(out_write_index);
292
293 static ssize_t out_read_bytes_avail_show(struct device *dev,
294                                          struct device_attribute *dev_attr,
295                                          char *buf)
296 {
297         struct hv_device *hv_dev = device_to_hv_device(dev);
298         struct hv_ring_buffer_debug_info outbound;
299
300         if (!hv_dev->channel)
301                 return -ENODEV;
302         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
303         return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
304 }
305 static DEVICE_ATTR_RO(out_read_bytes_avail);
306
307 static ssize_t out_write_bytes_avail_show(struct device *dev,
308                                           struct device_attribute *dev_attr,
309                                           char *buf)
310 {
311         struct hv_device *hv_dev = device_to_hv_device(dev);
312         struct hv_ring_buffer_debug_info outbound;
313
314         if (!hv_dev->channel)
315                 return -ENODEV;
316         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
317         return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
318 }
319 static DEVICE_ATTR_RO(out_write_bytes_avail);
320
321 static ssize_t in_intr_mask_show(struct device *dev,
322                                  struct device_attribute *dev_attr, char *buf)
323 {
324         struct hv_device *hv_dev = device_to_hv_device(dev);
325         struct hv_ring_buffer_debug_info inbound;
326
327         if (!hv_dev->channel)
328                 return -ENODEV;
329         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
330         return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
331 }
332 static DEVICE_ATTR_RO(in_intr_mask);
333
334 static ssize_t in_read_index_show(struct device *dev,
335                                   struct device_attribute *dev_attr, char *buf)
336 {
337         struct hv_device *hv_dev = device_to_hv_device(dev);
338         struct hv_ring_buffer_debug_info inbound;
339
340         if (!hv_dev->channel)
341                 return -ENODEV;
342         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
343         return sprintf(buf, "%d\n", inbound.current_read_index);
344 }
345 static DEVICE_ATTR_RO(in_read_index);
346
347 static ssize_t in_write_index_show(struct device *dev,
348                                    struct device_attribute *dev_attr, char *buf)
349 {
350         struct hv_device *hv_dev = device_to_hv_device(dev);
351         struct hv_ring_buffer_debug_info inbound;
352
353         if (!hv_dev->channel)
354                 return -ENODEV;
355         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
356         return sprintf(buf, "%d\n", inbound.current_write_index);
357 }
358 static DEVICE_ATTR_RO(in_write_index);
359
360 static ssize_t in_read_bytes_avail_show(struct device *dev,
361                                         struct device_attribute *dev_attr,
362                                         char *buf)
363 {
364         struct hv_device *hv_dev = device_to_hv_device(dev);
365         struct hv_ring_buffer_debug_info inbound;
366
367         if (!hv_dev->channel)
368                 return -ENODEV;
369         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
370         return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
371 }
372 static DEVICE_ATTR_RO(in_read_bytes_avail);
373
374 static ssize_t in_write_bytes_avail_show(struct device *dev,
375                                          struct device_attribute *dev_attr,
376                                          char *buf)
377 {
378         struct hv_device *hv_dev = device_to_hv_device(dev);
379         struct hv_ring_buffer_debug_info inbound;
380
381         if (!hv_dev->channel)
382                 return -ENODEV;
383         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
384         return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
385 }
386 static DEVICE_ATTR_RO(in_write_bytes_avail);
387
388 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
389 static struct attribute *vmbus_attrs[] = {
390         &dev_attr_id.attr,
391         &dev_attr_state.attr,
392         &dev_attr_monitor_id.attr,
393         &dev_attr_class_id.attr,
394         &dev_attr_device_id.attr,
395         &dev_attr_modalias.attr,
396         &dev_attr_server_monitor_pending.attr,
397         &dev_attr_client_monitor_pending.attr,
398         &dev_attr_server_monitor_latency.attr,
399         &dev_attr_client_monitor_latency.attr,
400         &dev_attr_server_monitor_conn_id.attr,
401         &dev_attr_client_monitor_conn_id.attr,
402         &dev_attr_out_intr_mask.attr,
403         &dev_attr_out_read_index.attr,
404         &dev_attr_out_write_index.attr,
405         &dev_attr_out_read_bytes_avail.attr,
406         &dev_attr_out_write_bytes_avail.attr,
407         &dev_attr_in_intr_mask.attr,
408         &dev_attr_in_read_index.attr,
409         &dev_attr_in_write_index.attr,
410         &dev_attr_in_read_bytes_avail.attr,
411         &dev_attr_in_write_bytes_avail.attr,
412         NULL,
413 };
414 ATTRIBUTE_GROUPS(vmbus);
415
416 /*
417  * vmbus_uevent - add uevent for our device
418  *
419  * This routine is invoked when a device is added or removed on the vmbus to
420  * generate a uevent to udev in the userspace. The udev will then look at its
421  * rule and the uevent generated here to load the appropriate driver
422  *
423  * The alias string will be of the form vmbus:guid where guid is the string
424  * representation of the device guid (each byte of the guid will be
425  * represented with two hex characters.
426  */
427 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
428 {
429         struct hv_device *dev = device_to_hv_device(device);
430         int ret;
431         char alias_name[VMBUS_ALIAS_LEN + 1];
432
433         print_alias_name(dev, alias_name);
434         ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
435         return ret;
436 }
437
438 static const uuid_le null_guid;
439
440 static inline bool is_null_guid(const __u8 *guid)
441 {
442         if (memcmp(guid, &null_guid, sizeof(uuid_le)))
443                 return false;
444         return true;
445 }
446
447 /*
448  * Return a matching hv_vmbus_device_id pointer.
449  * If there is no match, return NULL.
450  */
451 static const struct hv_vmbus_device_id *hv_vmbus_get_id(
452                                         const struct hv_vmbus_device_id *id,
453                                         const __u8 *guid)
454 {
455         for (; !is_null_guid(id->guid); id++)
456                 if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
457                         return id;
458
459         return NULL;
460 }
461
462
463
464 /*
465  * vmbus_match - Attempt to match the specified device to the specified driver
466  */
467 static int vmbus_match(struct device *device, struct device_driver *driver)
468 {
469         struct hv_driver *drv = drv_to_hv_drv(driver);
470         struct hv_device *hv_dev = device_to_hv_device(device);
471
472         if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
473                 return 1;
474
475         return 0;
476 }
477
478 /*
479  * vmbus_probe - Add the new vmbus's child device
480  */
481 static int vmbus_probe(struct device *child_device)
482 {
483         int ret = 0;
484         struct hv_driver *drv =
485                         drv_to_hv_drv(child_device->driver);
486         struct hv_device *dev = device_to_hv_device(child_device);
487         const struct hv_vmbus_device_id *dev_id;
488
489         dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
490         if (drv->probe) {
491                 ret = drv->probe(dev, dev_id);
492                 if (ret != 0)
493                         pr_err("probe failed for device %s (%d)\n",
494                                dev_name(child_device), ret);
495
496         } else {
497                 pr_err("probe not set for driver %s\n",
498                        dev_name(child_device));
499                 ret = -ENODEV;
500         }
501         return ret;
502 }
503
504 /*
505  * vmbus_remove - Remove a vmbus device
506  */
507 static int vmbus_remove(struct device *child_device)
508 {
509         struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
510         struct hv_device *dev = device_to_hv_device(child_device);
511
512         if (drv->remove)
513                 drv->remove(dev);
514         else
515                 pr_err("remove not set for driver %s\n",
516                         dev_name(child_device));
517
518         return 0;
519 }
520
521
522 /*
523  * vmbus_shutdown - Shutdown a vmbus device
524  */
525 static void vmbus_shutdown(struct device *child_device)
526 {
527         struct hv_driver *drv;
528         struct hv_device *dev = device_to_hv_device(child_device);
529
530
531         /* The device may not be attached yet */
532         if (!child_device->driver)
533                 return;
534
535         drv = drv_to_hv_drv(child_device->driver);
536
537         if (drv->shutdown)
538                 drv->shutdown(dev);
539
540         return;
541 }
542
543
544 /*
545  * vmbus_device_release - Final callback release of the vmbus child device
546  */
547 static void vmbus_device_release(struct device *device)
548 {
549         struct hv_device *hv_dev = device_to_hv_device(device);
550
551         kfree(hv_dev);
552
553 }
554
555 /* The one and only one */
556 static struct bus_type  hv_bus = {
557         .name =         "vmbus",
558         .match =                vmbus_match,
559         .shutdown =             vmbus_shutdown,
560         .remove =               vmbus_remove,
561         .probe =                vmbus_probe,
562         .uevent =               vmbus_uevent,
563         .dev_groups =           vmbus_groups,
564 };
565
566 struct onmessage_work_context {
567         struct work_struct work;
568         struct hv_message msg;
569 };
570
571 static void vmbus_onmessage_work(struct work_struct *work)
572 {
573         struct onmessage_work_context *ctx;
574
575         ctx = container_of(work, struct onmessage_work_context,
576                            work);
577         vmbus_onmessage(&ctx->msg);
578         kfree(ctx);
579 }
580
581 static void vmbus_on_msg_dpc(unsigned long data)
582 {
583         int cpu = smp_processor_id();
584         void *page_addr = hv_context.synic_message_page[cpu];
585         struct hv_message *msg = (struct hv_message *)page_addr +
586                                   VMBUS_MESSAGE_SINT;
587         struct onmessage_work_context *ctx;
588
589         while (1) {
590                 if (msg->header.message_type == HVMSG_NONE) {
591                         /* no msg */
592                         break;
593                 } else {
594                         ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
595                         if (ctx == NULL)
596                                 continue;
597                         INIT_WORK(&ctx->work, vmbus_onmessage_work);
598                         memcpy(&ctx->msg, msg, sizeof(*msg));
599                         queue_work(vmbus_connection.work_queue, &ctx->work);
600                 }
601
602                 msg->header.message_type = HVMSG_NONE;
603
604                 /*
605                  * Make sure the write to MessageType (ie set to
606                  * HVMSG_NONE) happens before we read the
607                  * MessagePending and EOMing. Otherwise, the EOMing
608                  * will not deliver any more messages since there is
609                  * no empty slot
610                  */
611                 mb();
612
613                 if (msg->header.message_flags.msg_pending) {
614                         /*
615                          * This will cause message queue rescan to
616                          * possibly deliver another msg from the
617                          * hypervisor
618                          */
619                         wrmsrl(HV_X64_MSR_EOM, 0);
620                 }
621         }
622 }
623
624 static void vmbus_isr(void)
625 {
626         int cpu = smp_processor_id();
627         void *page_addr;
628         struct hv_message *msg;
629         union hv_synic_event_flags *event;
630         bool handled = false;
631
632         page_addr = hv_context.synic_event_page[cpu];
633         if (page_addr == NULL)
634                 return;
635
636         event = (union hv_synic_event_flags *)page_addr +
637                                          VMBUS_MESSAGE_SINT;
638         /*
639          * Check for events before checking for messages. This is the order
640          * in which events and messages are checked in Windows guests on
641          * Hyper-V, and the Windows team suggested we do the same.
642          */
643
644         if ((vmbus_proto_version == VERSION_WS2008) ||
645                 (vmbus_proto_version == VERSION_WIN7)) {
646
647                 /* Since we are a child, we only need to check bit 0 */
648                 if (sync_test_and_clear_bit(0,
649                         (unsigned long *) &event->flags32[0])) {
650                         handled = true;
651                 }
652         } else {
653                 /*
654                  * Our host is win8 or above. The signaling mechanism
655                  * has changed and we can directly look at the event page.
656                  * If bit n is set then we have an interrup on the channel
657                  * whose id is n.
658                  */
659                 handled = true;
660         }
661
662         if (handled)
663                 tasklet_schedule(hv_context.event_dpc[cpu]);
664
665
666         page_addr = hv_context.synic_message_page[cpu];
667         msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
668
669         /* Check if there are actual msgs to be processed */
670         if (msg->header.message_type != HVMSG_NONE)
671                 tasklet_schedule(&msg_dpc);
672 }
673
674 /*
675  * vmbus_bus_init -Main vmbus driver initialization routine.
676  *
677  * Here, we
678  *      - initialize the vmbus driver context
679  *      - invoke the vmbus hv main init routine
680  *      - get the irq resource
681  *      - retrieve the channel offers
682  */
683 static int vmbus_bus_init(int irq)
684 {
685         int ret;
686
687         /* Hypervisor initialization...setup hypercall page..etc */
688         ret = hv_init();
689         if (ret != 0) {
690                 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
691                 return ret;
692         }
693
694         tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
695
696         ret = bus_register(&hv_bus);
697         if (ret)
698                 goto err_cleanup;
699
700         hv_setup_vmbus_irq(vmbus_isr);
701
702         ret = hv_synic_alloc();
703         if (ret)
704                 goto err_alloc;
705         /*
706          * Initialize the per-cpu interrupt state and
707          * connect to the host.
708          */
709         on_each_cpu(hv_synic_init, NULL, 1);
710         ret = vmbus_connect();
711         if (ret)
712                 goto err_alloc;
713
714         vmbus_request_offers();
715
716         return 0;
717
718 err_alloc:
719         hv_synic_free();
720         hv_remove_vmbus_irq();
721
722         bus_unregister(&hv_bus);
723
724 err_cleanup:
725         hv_cleanup();
726
727         return ret;
728 }
729
730 /**
731  * __vmbus_child_driver_register - Register a vmbus's driver
732  * @drv: Pointer to driver structure you want to register
733  * @owner: owner module of the drv
734  * @mod_name: module name string
735  *
736  * Registers the given driver with Linux through the 'driver_register()' call
737  * and sets up the hyper-v vmbus handling for this driver.
738  * It will return the state of the 'driver_register()' call.
739  *
740  */
741 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
742 {
743         int ret;
744
745         pr_info("registering driver %s\n", hv_driver->name);
746
747         ret = vmbus_exists();
748         if (ret < 0)
749                 return ret;
750
751         hv_driver->driver.name = hv_driver->name;
752         hv_driver->driver.owner = owner;
753         hv_driver->driver.mod_name = mod_name;
754         hv_driver->driver.bus = &hv_bus;
755
756         ret = driver_register(&hv_driver->driver);
757
758         return ret;
759 }
760 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
761
762 /**
763  * vmbus_driver_unregister() - Unregister a vmbus's driver
764  * @drv: Pointer to driver structure you want to un-register
765  *
766  * Un-register the given driver that was previous registered with a call to
767  * vmbus_driver_register()
768  */
769 void vmbus_driver_unregister(struct hv_driver *hv_driver)
770 {
771         pr_info("unregistering driver %s\n", hv_driver->name);
772
773         if (!vmbus_exists())
774                 driver_unregister(&hv_driver->driver);
775 }
776 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
777
778 /*
779  * vmbus_device_create - Creates and registers a new child device
780  * on the vmbus.
781  */
782 struct hv_device *vmbus_device_create(const uuid_le *type,
783                                       const uuid_le *instance,
784                                       struct vmbus_channel *channel)
785 {
786         struct hv_device *child_device_obj;
787
788         child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
789         if (!child_device_obj) {
790                 pr_err("Unable to allocate device object for child device\n");
791                 return NULL;
792         }
793
794         child_device_obj->channel = channel;
795         memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
796         memcpy(&child_device_obj->dev_instance, instance,
797                sizeof(uuid_le));
798
799
800         return child_device_obj;
801 }
802
803 /*
804  * vmbus_device_register - Register the child device
805  */
806 int vmbus_device_register(struct hv_device *child_device_obj)
807 {
808         int ret = 0;
809
810         static atomic_t device_num = ATOMIC_INIT(0);
811
812         dev_set_name(&child_device_obj->device, "vmbus_0_%d",
813                      atomic_inc_return(&device_num));
814
815         child_device_obj->device.bus = &hv_bus;
816         child_device_obj->device.parent = &hv_acpi_dev->dev;
817         child_device_obj->device.release = vmbus_device_release;
818
819         /*
820          * Register with the LDM. This will kick off the driver/device
821          * binding...which will eventually call vmbus_match() and vmbus_probe()
822          */
823         ret = device_register(&child_device_obj->device);
824
825         if (ret)
826                 pr_err("Unable to register child device\n");
827         else
828                 pr_debug("child device %s registered\n",
829                         dev_name(&child_device_obj->device));
830
831         return ret;
832 }
833
834 /*
835  * vmbus_device_unregister - Remove the specified child device
836  * from the vmbus.
837  */
838 void vmbus_device_unregister(struct hv_device *device_obj)
839 {
840         pr_debug("child device %s unregistered\n",
841                 dev_name(&device_obj->device));
842
843         /*
844          * Kick off the process of unregistering the device.
845          * This will call vmbus_remove() and eventually vmbus_device_release()
846          */
847         device_unregister(&device_obj->device);
848 }
849
850
851 /*
852  * VMBUS is an acpi enumerated device. Get the the information we
853  * need from DSDT.
854  */
855
856 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
857 {
858         switch (res->type) {
859         case ACPI_RESOURCE_TYPE_IRQ:
860                 irq = res->data.irq.interrupts[0];
861                 break;
862
863         case ACPI_RESOURCE_TYPE_ADDRESS64:
864                 hyperv_mmio.start = res->data.address64.address.minimum;
865                 hyperv_mmio.end = res->data.address64.address.maximum;
866                 break;
867         }
868
869         return AE_OK;
870 }
871
872 static int vmbus_acpi_add(struct acpi_device *device)
873 {
874         acpi_status result;
875         int ret_val = -ENODEV;
876
877         hv_acpi_dev = device;
878
879         result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
880                                         vmbus_walk_resources, NULL);
881
882         if (ACPI_FAILURE(result))
883                 goto acpi_walk_err;
884         /*
885          * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
886          * has the mmio ranges. Get that.
887          */
888         if (device->parent) {
889                 result = acpi_walk_resources(device->parent->handle,
890                                         METHOD_NAME__CRS,
891                                         vmbus_walk_resources, NULL);
892
893                 if (ACPI_FAILURE(result))
894                         goto acpi_walk_err;
895                 if (hyperv_mmio.start && hyperv_mmio.end)
896                         request_resource(&iomem_resource, &hyperv_mmio);
897         }
898         ret_val = 0;
899
900 acpi_walk_err:
901         complete(&probe_event);
902         return ret_val;
903 }
904
905 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
906         {"VMBUS", 0},
907         {"VMBus", 0},
908         {"", 0},
909 };
910 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
911
912 static struct acpi_driver vmbus_acpi_driver = {
913         .name = "vmbus",
914         .ids = vmbus_acpi_device_ids,
915         .ops = {
916                 .add = vmbus_acpi_add,
917         },
918 };
919
920 static int __init hv_acpi_init(void)
921 {
922         int ret, t;
923
924         if (x86_hyper != &x86_hyper_ms_hyperv)
925                 return -ENODEV;
926
927         init_completion(&probe_event);
928
929         /*
930          * Get irq resources first.
931          */
932         ret = acpi_bus_register_driver(&vmbus_acpi_driver);
933
934         if (ret)
935                 return ret;
936
937         t = wait_for_completion_timeout(&probe_event, 5*HZ);
938         if (t == 0) {
939                 ret = -ETIMEDOUT;
940                 goto cleanup;
941         }
942
943         if (irq <= 0) {
944                 ret = -ENODEV;
945                 goto cleanup;
946         }
947
948         ret = vmbus_bus_init(irq);
949         if (ret)
950                 goto cleanup;
951
952         return 0;
953
954 cleanup:
955         acpi_bus_unregister_driver(&vmbus_acpi_driver);
956         hv_acpi_dev = NULL;
957         return ret;
958 }
959
960 static void __exit vmbus_exit(void)
961 {
962         hv_remove_vmbus_irq();
963         vmbus_free_channels();
964         bus_unregister(&hv_bus);
965         hv_cleanup();
966         acpi_bus_unregister_driver(&vmbus_acpi_driver);
967 }
968
969
970 MODULE_LICENSE("GPL");
971
972 subsys_initcall(hv_acpi_init);
973 module_exit(vmbus_exit);