f6475673616486197f98465aebcc8f24cedebc0d
[linux-drm-fsl-dcu.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include "version.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33 #include <linux/crash_dump.h>
34
35 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36 #define TEST_VNIC_PHYSITF "eth0"        /* physical network itf for
37                                          * vnic loopback test */
38 #define TEST_VNIC_SWITCHNO 1
39 #define TEST_VNIC_BUSNO 9
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE   50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 /*
48  * Module parameters
49  */
50 static int visorchipset_testvnic;
51 static int visorchipset_testvnicclient;
52 static int visorchipset_testmsg;
53 static int visorchipset_major;
54 static int visorchipset_serverregwait;
55 static int visorchipset_clientregwait = 1;      /* default is on */
56 static int visorchipset_testteardown;
57 static int visorchipset_disable_controlvm;
58 static int visorchipset_holdchipsetready;
59
60 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
61 * we switch to slow polling mode.  As soon as we get a controlvm
62 * message, we switch back to fast polling mode.
63 */
64 #define MIN_IDLE_SECONDS 10
65 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
66 static unsigned long most_recent_message_jiffies;       /* when we got our last
67                                                  * controlvm message */
68 static int serverregistered;
69 static int clientregistered;
70
71 #define MAX_CHIPSET_EVENTS 2
72 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
73
74 static struct delayed_work periodic_controlvm_work;
75 static struct workqueue_struct *periodic_controlvm_workqueue;
76 static DEFINE_SEMAPHORE(notifier_lock);
77
78 static struct controlvm_message_header g_diag_msg_hdr;
79 static struct controlvm_message_header g_chipset_msg_hdr;
80 static struct controlvm_message_header g_del_dump_msg_hdr;
81 static const uuid_le spar_diag_pool_channel_protocol_uuid =
82         SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
83 /* 0xffffff is an invalid Bus/Device number */
84 static u32 g_diagpool_bus_no = 0xffffff;
85 static u32 g_diagpool_dev_no = 0xffffff;
86 static struct controlvm_message_packet g_devicechangestate_packet;
87
88 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
89  * "visorhackbus")
90  */
91 #define FOR_VISORHACKBUS(channel_type_guid) \
92         (((uuid_le_cmp(channel_type_guid,\
93                        spar_vnic_channel_protocol_uuid) == 0) ||\
94         (uuid_le_cmp(channel_type_guid,\
95                         spar_vhba_channel_protocol_uuid) == 0)))
96 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
97
98 #define is_diagpool_channel(channel_type_guid) \
99         (uuid_le_cmp(channel_type_guid,\
100                      spar_diag_pool_channel_protocol_uuid) == 0)
101
102 static LIST_HEAD(bus_info_list);
103 static LIST_HEAD(dev_info_list);
104
105 static struct visorchannel *controlvm_channel;
106
107 /* Manages the request payload in the controlvm channel */
108 struct visor_controlvm_payload_info {
109         u8 __iomem *ptr;        /* pointer to base address of payload pool */
110         u64 offset;             /* offset from beginning of controlvm
111                                  * channel to beginning of payload * pool */
112         u32 bytes;              /* number of bytes in payload pool */
113 };
114
115 static struct visor_controlvm_payload_info controlvm_payload_info;
116
117 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
118  * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
119  */
120 struct visor_livedump_info {
121         struct controlvm_message_header dumpcapture_header;
122         struct controlvm_message_header gettextdump_header;
123         struct controlvm_message_header dumpcomplete_header;
124         bool gettextdump_outstanding;
125         u32 crc32;
126         unsigned long length;
127         atomic_t buffers_in_use;
128         unsigned long destination;
129 };
130
131 static struct visor_livedump_info livedump_info;
132
133 /* The following globals are used to handle the scenario where we are unable to
134  * offload the payload from a controlvm message due to memory requirements.  In
135  * this scenario, we simply stash the controlvm message, then attempt to
136  * process it again the next time controlvm_periodic_work() runs.
137  */
138 static struct controlvm_message controlvm_pending_msg;
139 static bool controlvm_pending_msg_valid = false;
140
141 /* This identifies a data buffer that has been received via a controlvm messages
142  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
143  */
144 struct putfile_buffer_entry {
145         struct list_head next;  /* putfile_buffer_entry list */
146         struct parser_context *parser_ctx; /* points to input data buffer */
147 };
148
149 /* List of struct putfile_request *, via next_putfile_request member.
150  * Each entry in this list identifies an outstanding TRANSMIT_FILE
151  * conversation.
152  */
153 static LIST_HEAD(putfile_request_list);
154
155 /* This describes a buffer and its current state of transfer (e.g., how many
156  * bytes have already been supplied as putfile data, and how many bytes are
157  * remaining) for a putfile_request.
158  */
159 struct putfile_active_buffer {
160         /* a payload from a controlvm message, containing a file data buffer */
161         struct parser_context *parser_ctx;
162         /* points within data area of parser_ctx to next byte of data */
163         u8 *pnext;
164         /* # bytes left from <pnext> to the end of this data buffer */
165         size_t bytes_remaining;
166 };
167
168 #define PUTFILE_REQUEST_SIG 0x0906101302281211
169 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
170  * conversation.  Structs of this type are dynamically linked into
171  * <Putfile_request_list>.
172  */
173 struct putfile_request {
174         u64 sig;                /* PUTFILE_REQUEST_SIG */
175
176         /* header from original TransmitFile request */
177         struct controlvm_message_header controlvm_header;
178         u64 file_request_number;        /* from original TransmitFile request */
179
180         /* link to next struct putfile_request */
181         struct list_head next_putfile_request;
182
183         /* most-recent sequence number supplied via a controlvm message */
184         u64 data_sequence_number;
185
186         /* head of putfile_buffer_entry list, which describes the data to be
187          * supplied as putfile data;
188          * - this list is added to when controlvm messages come in that supply
189          * file data
190          * - this list is removed from via the hotplug program that is actually
191          * consuming these buffers to write as file data */
192         struct list_head input_buffer_list;
193         spinlock_t req_list_lock;       /* lock for input_buffer_list */
194
195         /* waiters for input_buffer_list to go non-empty */
196         wait_queue_head_t input_buffer_wq;
197
198         /* data not yet read within current putfile_buffer_entry */
199         struct putfile_active_buffer active_buf;
200
201         /* <0 = failed, 0 = in-progress, >0 = successful; */
202         /* note that this must be set with req_list_lock, and if you set <0, */
203         /* it is your responsibility to also free up all of the other objects */
204         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
205         /* before releasing the lock */
206         int completion_status;
207 };
208
209 struct parahotplug_request {
210         struct list_head list;
211         int id;
212         unsigned long expiration;
213         struct controlvm_message msg;
214 };
215
216 static LIST_HEAD(parahotplug_request_list);
217 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
218 static void parahotplug_process_list(void);
219
220 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
221  * CONTROLVM_REPORTEVENT.
222  */
223 static struct visorchipset_busdev_notifiers busdev_server_notifiers;
224 static struct visorchipset_busdev_notifiers busdev_client_notifiers;
225
226 static void bus_create_response(u32 bus_no, int response);
227 static void bus_destroy_response(u32 bus_no, int response);
228 static void device_create_response(u32 bus_no, u32 dev_no, int response);
229 static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
230 static void device_resume_response(u32 bus_no, u32 dev_no, int response);
231
232 static struct visorchipset_busdev_responders busdev_responders = {
233         .bus_create = bus_create_response,
234         .bus_destroy = bus_destroy_response,
235         .device_create = device_create_response,
236         .device_destroy = device_destroy_response,
237         .device_pause = visorchipset_device_pause_response,
238         .device_resume = device_resume_response,
239 };
240
241 /* info for /dev/visorchipset */
242 static dev_t major_dev = -1; /**< indicates major num for device */
243
244 /* prototypes for attributes */
245 static ssize_t toolaction_show(struct device *dev,
246                                struct device_attribute *attr, char *buf);
247 static ssize_t toolaction_store(struct device *dev,
248                                 struct device_attribute *attr,
249                                 const char *buf, size_t count);
250 static DEVICE_ATTR_RW(toolaction);
251
252 static ssize_t boottotool_show(struct device *dev,
253                                struct device_attribute *attr, char *buf);
254 static ssize_t boottotool_store(struct device *dev,
255                                 struct device_attribute *attr, const char *buf,
256                                 size_t count);
257 static DEVICE_ATTR_RW(boottotool);
258
259 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
260                           char *buf);
261 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
262                            const char *buf, size_t count);
263 static DEVICE_ATTR_RW(error);
264
265 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
266                            char *buf);
267 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
268                             const char *buf, size_t count);
269 static DEVICE_ATTR_RW(textid);
270
271 static ssize_t remaining_steps_show(struct device *dev,
272                                     struct device_attribute *attr, char *buf);
273 static ssize_t remaining_steps_store(struct device *dev,
274                                      struct device_attribute *attr,
275                                      const char *buf, size_t count);
276 static DEVICE_ATTR_RW(remaining_steps);
277
278 static ssize_t chipsetready_store(struct device *dev,
279                                   struct device_attribute *attr,
280                                   const char *buf, size_t count);
281 static DEVICE_ATTR_WO(chipsetready);
282
283 static ssize_t devicedisabled_store(struct device *dev,
284                                     struct device_attribute *attr,
285                                     const char *buf, size_t count);
286 static DEVICE_ATTR_WO(devicedisabled);
287
288 static ssize_t deviceenabled_store(struct device *dev,
289                                    struct device_attribute *attr,
290                                    const char *buf, size_t count);
291 static DEVICE_ATTR_WO(deviceenabled);
292
293 static struct attribute *visorchipset_install_attrs[] = {
294         &dev_attr_toolaction.attr,
295         &dev_attr_boottotool.attr,
296         &dev_attr_error.attr,
297         &dev_attr_textid.attr,
298         &dev_attr_remaining_steps.attr,
299         NULL
300 };
301
302 static struct attribute_group visorchipset_install_group = {
303         .name = "install",
304         .attrs = visorchipset_install_attrs
305 };
306
307 static struct attribute *visorchipset_guest_attrs[] = {
308         &dev_attr_chipsetready.attr,
309         NULL
310 };
311
312 static struct attribute_group visorchipset_guest_group = {
313         .name = "guest",
314         .attrs = visorchipset_guest_attrs
315 };
316
317 static struct attribute *visorchipset_parahotplug_attrs[] = {
318         &dev_attr_devicedisabled.attr,
319         &dev_attr_deviceenabled.attr,
320         NULL
321 };
322
323 static struct attribute_group visorchipset_parahotplug_group = {
324         .name = "parahotplug",
325         .attrs = visorchipset_parahotplug_attrs
326 };
327
328 static const struct attribute_group *visorchipset_dev_groups[] = {
329         &visorchipset_install_group,
330         &visorchipset_guest_group,
331         &visorchipset_parahotplug_group,
332         NULL
333 };
334
335 /* /sys/devices/platform/visorchipset */
336 static struct platform_device visorchipset_platform_device = {
337         .name = "visorchipset",
338         .id = -1,
339         .dev.groups = visorchipset_dev_groups,
340 };
341
342 /* Function prototypes */
343 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
344                               int response);
345 static void controlvm_respond_chipset_init(
346                 struct controlvm_message_header *msg_hdr, int response,
347                 enum ultra_chipset_feature features);
348 static void controlvm_respond_physdev_changestate(
349                 struct controlvm_message_header *msg_hdr, int response,
350                 struct spar_segment_state state);
351
352 static ssize_t toolaction_show(struct device *dev,
353                                struct device_attribute *attr,
354                                char *buf)
355 {
356         u8 tool_action;
357
358         visorchannel_read(controlvm_channel,
359                 offsetof(struct spar_controlvm_channel_protocol,
360                          tool_action), &tool_action, sizeof(u8));
361         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
362 }
363
364 static ssize_t toolaction_store(struct device *dev,
365                                 struct device_attribute *attr,
366                                 const char *buf, size_t count)
367 {
368         u8 tool_action;
369         int ret;
370
371         if (kstrtou8(buf, 10, &tool_action) != 0)
372                 return -EINVAL;
373
374         ret = visorchannel_write(controlvm_channel,
375                 offsetof(struct spar_controlvm_channel_protocol,
376                          tool_action),
377                 &tool_action, sizeof(u8));
378
379         if (ret)
380                 return ret;
381         return count;
382 }
383
384 static ssize_t boottotool_show(struct device *dev,
385                                struct device_attribute *attr,
386                                char *buf)
387 {
388         struct efi_spar_indication efi_spar_indication;
389
390         visorchannel_read(controlvm_channel,
391                           offsetof(struct spar_controlvm_channel_protocol,
392                                    efi_spar_ind), &efi_spar_indication,
393                           sizeof(struct efi_spar_indication));
394         return scnprintf(buf, PAGE_SIZE, "%u\n",
395                          efi_spar_indication.boot_to_tool);
396 }
397
398 static ssize_t boottotool_store(struct device *dev,
399                                 struct device_attribute *attr,
400                                 const char *buf, size_t count)
401 {
402         int val, ret;
403         struct efi_spar_indication efi_spar_indication;
404
405         if (kstrtoint(buf, 10, &val) != 0)
406                 return -EINVAL;
407
408         efi_spar_indication.boot_to_tool = val;
409         ret = visorchannel_write(controlvm_channel,
410                         offsetof(struct spar_controlvm_channel_protocol,
411                                  efi_spar_ind), &(efi_spar_indication),
412                                  sizeof(struct efi_spar_indication));
413
414         if (ret)
415                 return ret;
416         return count;
417 }
418
419 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
420                           char *buf)
421 {
422         u32 error;
423
424         visorchannel_read(controlvm_channel,
425                           offsetof(struct spar_controlvm_channel_protocol,
426                                    installation_error),
427                           &error, sizeof(u32));
428         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
429 }
430
431 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
432                            const char *buf, size_t count)
433 {
434         u32 error;
435         int ret;
436
437         if (kstrtou32(buf, 10, &error) != 0)
438                 return -EINVAL;
439
440         ret = visorchannel_write(controlvm_channel,
441                 offsetof(struct spar_controlvm_channel_protocol,
442                          installation_error),
443                 &error, sizeof(u32));
444         if (ret)
445                 return ret;
446         return count;
447 }
448
449 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
450                            char *buf)
451 {
452         u32 text_id;
453
454         visorchannel_read(controlvm_channel,
455                           offsetof(struct spar_controlvm_channel_protocol,
456                                    installation_text_id),
457                           &text_id, sizeof(u32));
458         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
459 }
460
461 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
462                             const char *buf, size_t count)
463 {
464         u32 text_id;
465         int ret;
466
467         if (kstrtou32(buf, 10, &text_id) != 0)
468                 return -EINVAL;
469
470         ret = visorchannel_write(controlvm_channel,
471                 offsetof(struct spar_controlvm_channel_protocol,
472                          installation_text_id),
473                 &text_id, sizeof(u32));
474         if (ret)
475                 return ret;
476         return count;
477 }
478
479 static ssize_t remaining_steps_show(struct device *dev,
480                                     struct device_attribute *attr, char *buf)
481 {
482         u16 remaining_steps;
483
484         visorchannel_read(controlvm_channel,
485                           offsetof(struct spar_controlvm_channel_protocol,
486                                    installation_remaining_steps),
487                           &remaining_steps, sizeof(u16));
488         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
489 }
490
491 static ssize_t remaining_steps_store(struct device *dev,
492                                      struct device_attribute *attr,
493                                      const char *buf, size_t count)
494 {
495         u16 remaining_steps;
496         int ret;
497
498         if (kstrtou16(buf, 10, &remaining_steps) != 0)
499                 return -EINVAL;
500
501         ret = visorchannel_write(controlvm_channel,
502                 offsetof(struct spar_controlvm_channel_protocol,
503                          installation_remaining_steps),
504                 &remaining_steps, sizeof(u16));
505         if (ret)
506                 return ret;
507         return count;
508 }
509
510 static void
511 bus_info_clear(void *v)
512 {
513         struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
514
515         kfree(p->name);
516         kfree(p->description);
517         memset(p, 0, sizeof(struct visorchipset_bus_info));
518 }
519
520 static void
521 dev_info_clear(void *v)
522 {
523         struct visorchipset_device_info *p =
524                 (struct visorchipset_device_info *) v;
525
526         memset(p, 0, sizeof(struct visorchipset_device_info));
527 }
528
529 static struct visorchipset_bus_info *
530 bus_find(struct list_head *list, u32 bus_no)
531 {
532         struct visorchipset_bus_info *p;
533
534         list_for_each_entry(p, list, entry) {
535                 if (p->bus_no == bus_no)
536                         return p;
537         }
538
539         return NULL;
540 }
541
542 static u8
543 check_chipset_events(void)
544 {
545         int i;
546         u8 send_msg = 1;
547         /* Check events to determine if response should be sent */
548         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
549                 send_msg &= chipset_events[i];
550         return send_msg;
551 }
552
553 static void
554 clear_chipset_events(void)
555 {
556         int i;
557         /* Clear chipset_events */
558         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
559                 chipset_events[i] = 0;
560 }
561
562 void
563 visorchipset_register_busdev_server(
564                         struct visorchipset_busdev_notifiers *notifiers,
565                         struct visorchipset_busdev_responders *responders,
566                         struct ultra_vbus_deviceinfo *driver_info)
567 {
568         down(&notifier_lock);
569         if (!notifiers) {
570                 memset(&busdev_server_notifiers, 0,
571                        sizeof(busdev_server_notifiers));
572                 serverregistered = 0;   /* clear flag */
573         } else {
574                 busdev_server_notifiers = *notifiers;
575                 serverregistered = 1;   /* set flag */
576         }
577         if (responders)
578                 *responders = busdev_responders;
579         if (driver_info)
580                 bus_device_info_init(driver_info, "chipset", "visorchipset",
581                                      VERSION, NULL);
582
583         up(&notifier_lock);
584 }
585 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
586
587 void
588 visorchipset_register_busdev_client(
589                         struct visorchipset_busdev_notifiers *notifiers,
590                         struct visorchipset_busdev_responders *responders,
591                         struct ultra_vbus_deviceinfo *driver_info)
592 {
593         down(&notifier_lock);
594         if (!notifiers) {
595                 memset(&busdev_client_notifiers, 0,
596                        sizeof(busdev_client_notifiers));
597                 clientregistered = 0;   /* clear flag */
598         } else {
599                 busdev_client_notifiers = *notifiers;
600                 clientregistered = 1;   /* set flag */
601         }
602         if (responders)
603                 *responders = busdev_responders;
604         if (driver_info)
605                 bus_device_info_init(driver_info, "chipset(bolts)",
606                                      "visorchipset", VERSION, NULL);
607         up(&notifier_lock);
608 }
609 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
610
611 static void
612 cleanup_controlvm_structures(void)
613 {
614         struct visorchipset_bus_info *bi, *tmp_bi;
615         struct visorchipset_device_info *di, *tmp_di;
616
617         list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
618                 bus_info_clear(bi);
619                 list_del(&bi->entry);
620                 kfree(bi);
621         }
622
623         list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
624                 dev_info_clear(di);
625                 list_del(&di->entry);
626                 kfree(di);
627         }
628 }
629
630 static void
631 chipset_init(struct controlvm_message *inmsg)
632 {
633         static int chipset_inited;
634         enum ultra_chipset_feature features = 0;
635         int rc = CONTROLVM_RESP_SUCCESS;
636
637         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
638         if (chipset_inited) {
639                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
640                 goto cleanup;
641         }
642         chipset_inited = 1;
643         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
644
645         /* Set features to indicate we support parahotplug (if Command
646          * also supports it). */
647         features =
648             inmsg->cmd.init_chipset.
649             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
650
651         /* Set the "reply" bit so Command knows this is a
652          * features-aware driver. */
653         features |= ULTRA_CHIPSET_FEATURE_REPLY;
654
655 cleanup:
656         if (rc < 0)
657                 cleanup_controlvm_structures();
658         if (inmsg->hdr.flags.response_expected)
659                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
660 }
661
662 static void
663 controlvm_init_response(struct controlvm_message *msg,
664                         struct controlvm_message_header *msg_hdr, int response)
665 {
666         memset(msg, 0, sizeof(struct controlvm_message));
667         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
668         msg->hdr.payload_bytes = 0;
669         msg->hdr.payload_vm_offset = 0;
670         msg->hdr.payload_max_bytes = 0;
671         if (response < 0) {
672                 msg->hdr.flags.failed = 1;
673                 msg->hdr.completion_status = (u32) (-response);
674         }
675 }
676
677 static void
678 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
679 {
680         struct controlvm_message outmsg;
681
682         controlvm_init_response(&outmsg, msg_hdr, response);
683         /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
684         * back the deviceChangeState structure in the packet. */
685         if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
686             g_devicechangestate_packet.device_change_state.bus_no ==
687             g_diagpool_bus_no &&
688             g_devicechangestate_packet.device_change_state.dev_no ==
689             g_diagpool_dev_no)
690                 outmsg.cmd = g_devicechangestate_packet;
691         if (outmsg.hdr.flags.test_message == 1)
692                 return;
693
694         if (!visorchannel_signalinsert(controlvm_channel,
695                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
696                 return;
697         }
698 }
699
700 static void
701 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
702                                int response,
703                                enum ultra_chipset_feature features)
704 {
705         struct controlvm_message outmsg;
706
707         controlvm_init_response(&outmsg, msg_hdr, response);
708         outmsg.cmd.init_chipset.features = features;
709         if (!visorchannel_signalinsert(controlvm_channel,
710                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
711                 return;
712         }
713 }
714
715 static void controlvm_respond_physdev_changestate(
716                 struct controlvm_message_header *msg_hdr, int response,
717                 struct spar_segment_state state)
718 {
719         struct controlvm_message outmsg;
720
721         controlvm_init_response(&outmsg, msg_hdr, response);
722         outmsg.cmd.device_change_state.state = state;
723         outmsg.cmd.device_change_state.flags.phys_device = 1;
724         if (!visorchannel_signalinsert(controlvm_channel,
725                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
726                 return;
727         }
728 }
729
730 void
731 visorchipset_save_message(struct controlvm_message *msg,
732                           enum crash_obj_type type)
733 {
734         u32 crash_msg_offset;
735         u16 crash_msg_count;
736
737         /* get saved message count */
738         if (visorchannel_read(controlvm_channel,
739                               offsetof(struct spar_controlvm_channel_protocol,
740                                        saved_crash_message_count),
741                               &crash_msg_count, sizeof(u16)) < 0) {
742                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
743                                  POSTCODE_SEVERITY_ERR);
744                 return;
745         }
746
747         if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
748                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
749                                  crash_msg_count,
750                                  POSTCODE_SEVERITY_ERR);
751                 return;
752         }
753
754         /* get saved crash message offset */
755         if (visorchannel_read(controlvm_channel,
756                               offsetof(struct spar_controlvm_channel_protocol,
757                                        saved_crash_message_offset),
758                               &crash_msg_offset, sizeof(u32)) < 0) {
759                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
760                                  POSTCODE_SEVERITY_ERR);
761                 return;
762         }
763
764         if (type == CRASH_BUS) {
765                 if (visorchannel_write(controlvm_channel,
766                                        crash_msg_offset,
767                                        msg,
768                                        sizeof(struct controlvm_message)) < 0) {
769                         POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
770                                          POSTCODE_SEVERITY_ERR);
771                         return;
772                 }
773         } else {
774                 if (visorchannel_write(controlvm_channel,
775                                        crash_msg_offset +
776                                        sizeof(struct controlvm_message), msg,
777                                        sizeof(struct controlvm_message)) < 0) {
778                         POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
779                                          POSTCODE_SEVERITY_ERR);
780                         return;
781                 }
782         }
783 }
784 EXPORT_SYMBOL_GPL(visorchipset_save_message);
785
786 static void
787 bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
788 {
789         struct visorchipset_bus_info *p;
790         bool need_clear = false;
791
792         p = bus_find(&bus_info_list, bus_no);
793         if (!p)
794                 return;
795
796         if (response < 0) {
797                 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
798                     (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
799                         /* undo the row we just created... */
800                         delbusdevices(&dev_info_list, bus_no);
801         } else {
802                 if (cmd_id == CONTROLVM_BUS_CREATE)
803                         p->state.created = 1;
804                 if (cmd_id == CONTROLVM_BUS_DESTROY)
805                         need_clear = true;
806         }
807
808         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
809                 return;         /* no controlvm response needed */
810         if (p->pending_msg_hdr.id != (u32)cmd_id)
811                 return;
812         controlvm_respond(&p->pending_msg_hdr, response);
813         p->pending_msg_hdr.id = CONTROLVM_INVALID;
814         if (need_clear) {
815                 bus_info_clear(p);
816                 delbusdevices(&dev_info_list, bus_no);
817         }
818 }
819
820 static void
821 device_changestate_responder(enum controlvm_id cmd_id,
822                              u32 bus_no, u32 dev_no, int response,
823                              struct spar_segment_state response_state)
824 {
825         struct visorchipset_device_info *p;
826         struct controlvm_message outmsg;
827
828         p = finddevice(&dev_info_list, bus_no, dev_no);
829         if (!p)
830                 return;
831         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
832                 return;         /* no controlvm response needed */
833         if (p->pending_msg_hdr.id != cmd_id)
834                 return;
835
836         controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
837
838         outmsg.cmd.device_change_state.bus_no = bus_no;
839         outmsg.cmd.device_change_state.dev_no = dev_no;
840         outmsg.cmd.device_change_state.state = response_state;
841
842         if (!visorchannel_signalinsert(controlvm_channel,
843                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
844                 return;
845
846         p->pending_msg_hdr.id = CONTROLVM_INVALID;
847 }
848
849 static void
850 device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
851 {
852         struct visorchipset_device_info *p;
853         bool need_clear = false;
854
855         p = finddevice(&dev_info_list, bus_no, dev_no);
856         if (!p)
857                 return;
858         if (response >= 0) {
859                 if (cmd_id == CONTROLVM_DEVICE_CREATE)
860                         p->state.created = 1;
861                 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
862                         need_clear = true;
863         }
864
865         if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
866                 return;         /* no controlvm response needed */
867
868         if (p->pending_msg_hdr.id != (u32)cmd_id)
869                 return;
870
871         controlvm_respond(&p->pending_msg_hdr, response);
872         p->pending_msg_hdr.id = CONTROLVM_INVALID;
873         if (need_clear)
874                 dev_info_clear(p);
875 }
876
877 static void
878 bus_epilog(u32 bus_no,
879            u32 cmd, struct controlvm_message_header *msg_hdr,
880            int response, bool need_response)
881 {
882         struct visorchipset_bus_info *bus_info;
883         bool notified = false;
884
885         bus_info = bus_find(&bus_info_list, bus_no);
886
887         if (!bus_info)
888                 return;
889
890         if (need_response) {
891                 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
892                        sizeof(struct controlvm_message_header));
893         } else {
894                 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
895         }
896
897         down(&notifier_lock);
898         if (response == CONTROLVM_RESP_SUCCESS) {
899                 switch (cmd) {
900                 case CONTROLVM_BUS_CREATE:
901                         /* We can't tell from the bus_create
902                         * information which of our 2 bus flavors the
903                         * devices on this bus will ultimately end up.
904                         * FORTUNATELY, it turns out it is harmless to
905                         * send the bus_create to both of them.  We can
906                         * narrow things down a little bit, though,
907                         * because we know: - BusDev_Server can handle
908                         * either server or client devices
909                         * - BusDev_Client can handle ONLY client
910                         * devices */
911                         if (busdev_server_notifiers.bus_create) {
912                                 (*busdev_server_notifiers.bus_create) (bus_no);
913                                 notified = true;
914                         }
915                         if ((!bus_info->flags.server) /*client */ &&
916                             busdev_client_notifiers.bus_create) {
917                                 (*busdev_client_notifiers.bus_create) (bus_no);
918                                 notified = true;
919                         }
920                         break;
921                 case CONTROLVM_BUS_DESTROY:
922                         if (busdev_server_notifiers.bus_destroy) {
923                                 (*busdev_server_notifiers.bus_destroy) (bus_no);
924                                 notified = true;
925                         }
926                         if ((!bus_info->flags.server) /*client */ &&
927                             busdev_client_notifiers.bus_destroy) {
928                                 (*busdev_client_notifiers.bus_destroy) (bus_no);
929                                 notified = true;
930                         }
931                         break;
932                 }
933         }
934         if (notified)
935                 /* The callback function just called above is responsible
936                  * for calling the appropriate visorchipset_busdev_responders
937                  * function, which will call bus_responder()
938                  */
939                 ;
940         else
941                 bus_responder(cmd, bus_no, response);
942         up(&notifier_lock);
943 }
944
945 static void
946 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
947               struct controlvm_message_header *msg_hdr, int response,
948               bool need_response, bool for_visorbus)
949 {
950         struct visorchipset_busdev_notifiers *notifiers;
951         bool notified = false;
952
953         struct visorchipset_device_info *dev_info =
954                 finddevice(&dev_info_list, bus_no, dev_no);
955         char *envp[] = {
956                 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
957                 NULL
958         };
959
960         if (!dev_info)
961                 return;
962
963         if (for_visorbus)
964                 notifiers = &busdev_server_notifiers;
965         else
966                 notifiers = &busdev_client_notifiers;
967         if (need_response) {
968                 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
969                        sizeof(struct controlvm_message_header));
970         } else {
971                 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
972         }
973
974         down(&notifier_lock);
975         if (response >= 0) {
976                 switch (cmd) {
977                 case CONTROLVM_DEVICE_CREATE:
978                         if (notifiers->device_create) {
979                                 (*notifiers->device_create) (bus_no, dev_no);
980                                 notified = true;
981                         }
982                         break;
983                 case CONTROLVM_DEVICE_CHANGESTATE:
984                         /* ServerReady / ServerRunning / SegmentStateRunning */
985                         if (state.alive == segment_state_running.alive &&
986                             state.operating ==
987                                 segment_state_running.operating) {
988                                 if (notifiers->device_resume) {
989                                         (*notifiers->device_resume) (bus_no,
990                                                                      dev_no);
991                                         notified = true;
992                                 }
993                         }
994                         /* ServerNotReady / ServerLost / SegmentStateStandby */
995                         else if (state.alive == segment_state_standby.alive &&
996                                  state.operating ==
997                                  segment_state_standby.operating) {
998                                 /* technically this is standby case
999                                  * where server is lost
1000                                  */
1001                                 if (notifiers->device_pause) {
1002                                         (*notifiers->device_pause) (bus_no,
1003                                                                     dev_no);
1004                                         notified = true;
1005                                 }
1006                         } else if (state.alive == segment_state_paused.alive &&
1007                                    state.operating ==
1008                                    segment_state_paused.operating) {
1009                                 /* this is lite pause where channel is
1010                                  * still valid just 'pause' of it
1011                                  */
1012                                 if (bus_no == g_diagpool_bus_no &&
1013                                     dev_no == g_diagpool_dev_no) {
1014                                         /* this will trigger the
1015                                          * diag_shutdown.sh script in
1016                                          * the visorchipset hotplug */
1017                                         kobject_uevent_env
1018                                             (&visorchipset_platform_device.dev.
1019                                              kobj, KOBJ_ONLINE, envp);
1020                                 }
1021                         }
1022                         break;
1023                 case CONTROLVM_DEVICE_DESTROY:
1024                         if (notifiers->device_destroy) {
1025                                 (*notifiers->device_destroy) (bus_no, dev_no);
1026                                 notified = true;
1027                         }
1028                         break;
1029                 }
1030         }
1031         if (notified)
1032                 /* The callback function just called above is responsible
1033                  * for calling the appropriate visorchipset_busdev_responders
1034                  * function, which will call device_responder()
1035                  */
1036                 ;
1037         else
1038                 device_responder(cmd, bus_no, dev_no, response);
1039         up(&notifier_lock);
1040 }
1041
1042 static void
1043 bus_create(struct controlvm_message *inmsg)
1044 {
1045         struct controlvm_message_packet *cmd = &inmsg->cmd;
1046         u32 bus_no = cmd->create_bus.bus_no;
1047         int rc = CONTROLVM_RESP_SUCCESS;
1048         struct visorchipset_bus_info *bus_info;
1049
1050         bus_info = bus_find(&bus_info_list, bus_no);
1051         if (bus_info && (bus_info->state.created == 1)) {
1052                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1053                                  POSTCODE_SEVERITY_ERR);
1054                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1055                 goto cleanup;
1056         }
1057         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1058         if (!bus_info) {
1059                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1060                                  POSTCODE_SEVERITY_ERR);
1061                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1062                 goto cleanup;
1063         }
1064
1065         INIT_LIST_HEAD(&bus_info->entry);
1066         bus_info->bus_no = bus_no;
1067
1068         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1069
1070         if (inmsg->hdr.flags.test_message == 1)
1071                 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1072         else
1073                 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1074
1075         bus_info->flags.server = inmsg->hdr.flags.server;
1076         bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1077         bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1078         bus_info->chan_info.channel_type_uuid =
1079                         cmd->create_bus.bus_data_type_uuid;
1080         bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1081
1082         list_add(&bus_info->entry, &bus_info_list);
1083
1084         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1085
1086 cleanup:
1087         bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1088                    rc, inmsg->hdr.flags.response_expected == 1);
1089 }
1090
1091 static void
1092 bus_destroy(struct controlvm_message *inmsg)
1093 {
1094         struct controlvm_message_packet *cmd = &inmsg->cmd;
1095         u32 bus_no = cmd->destroy_bus.bus_no;
1096         struct visorchipset_bus_info *bus_info;
1097         int rc = CONTROLVM_RESP_SUCCESS;
1098
1099         bus_info = bus_find(&bus_info_list, bus_no);
1100         if (!bus_info)
1101                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1102         else if (bus_info->state.created == 0)
1103                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1104
1105         bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1106                    rc, inmsg->hdr.flags.response_expected == 1);
1107 }
1108
1109 static void
1110 bus_configure(struct controlvm_message *inmsg,
1111               struct parser_context *parser_ctx)
1112 {
1113         struct controlvm_message_packet *cmd = &inmsg->cmd;
1114         u32 bus_no;
1115         struct visorchipset_bus_info *bus_info;
1116         int rc = CONTROLVM_RESP_SUCCESS;
1117         char s[99];
1118
1119         bus_no = cmd->configure_bus.bus_no;
1120         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1121                          POSTCODE_SEVERITY_INFO);
1122
1123         bus_info = bus_find(&bus_info_list, bus_no);
1124         if (!bus_info) {
1125                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1126                                  POSTCODE_SEVERITY_ERR);
1127                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1128         } else if (bus_info->state.created == 0) {
1129                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1130                                  POSTCODE_SEVERITY_ERR);
1131                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1132         } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1133                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1134                                  POSTCODE_SEVERITY_ERR);
1135                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1136         } else {
1137                 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1138                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1139                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1140                 bus_info->name = parser_string_get(parser_ctx);
1141
1142                 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1143                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1144                                  POSTCODE_SEVERITY_INFO);
1145         }
1146         bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1147                    rc, inmsg->hdr.flags.response_expected == 1);
1148 }
1149
1150 static void
1151 my_device_create(struct controlvm_message *inmsg)
1152 {
1153         struct controlvm_message_packet *cmd = &inmsg->cmd;
1154         u32 bus_no = cmd->create_device.bus_no;
1155         u32 dev_no = cmd->create_device.dev_no;
1156         struct visorchipset_device_info *dev_info;
1157         struct visorchipset_bus_info *bus_info;
1158         int rc = CONTROLVM_RESP_SUCCESS;
1159
1160         dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1161         if (dev_info && (dev_info->state.created == 1)) {
1162                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1163                                  POSTCODE_SEVERITY_ERR);
1164                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1165                 goto cleanup;
1166         }
1167         bus_info = bus_find(&bus_info_list, bus_no);
1168         if (!bus_info) {
1169                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1170                                  POSTCODE_SEVERITY_ERR);
1171                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1172                 goto cleanup;
1173         }
1174         if (bus_info->state.created == 0) {
1175                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1176                                  POSTCODE_SEVERITY_ERR);
1177                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1178                 goto cleanup;
1179         }
1180         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1181         if (!dev_info) {
1182                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1183                                  POSTCODE_SEVERITY_ERR);
1184                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1185                 goto cleanup;
1186         }
1187
1188         INIT_LIST_HEAD(&dev_info->entry);
1189         dev_info->bus_no = bus_no;
1190         dev_info->dev_no = dev_no;
1191         dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1192         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1193                          POSTCODE_SEVERITY_INFO);
1194
1195         if (inmsg->hdr.flags.test_message == 1)
1196                 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1197         else
1198                 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1199         dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1200         dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1201         dev_info->chan_info.channel_type_uuid =
1202                         cmd->create_device.data_type_uuid;
1203         dev_info->chan_info.intr = cmd->create_device.intr;
1204         list_add(&dev_info->entry, &dev_info_list);
1205         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1206                          POSTCODE_SEVERITY_INFO);
1207 cleanup:
1208         /* get the bus and devNo for DiagPool channel */
1209         if (dev_info &&
1210             is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1211                 g_diagpool_bus_no = bus_no;
1212                 g_diagpool_dev_no = dev_no;
1213         }
1214         device_epilog(bus_no, dev_no, segment_state_running,
1215                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1216                       inmsg->hdr.flags.response_expected == 1,
1217                       FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1218 }
1219
1220 static void
1221 my_device_changestate(struct controlvm_message *inmsg)
1222 {
1223         struct controlvm_message_packet *cmd = &inmsg->cmd;
1224         u32 bus_no = cmd->device_change_state.bus_no;
1225         u32 dev_no = cmd->device_change_state.dev_no;
1226         struct spar_segment_state state = cmd->device_change_state.state;
1227         struct visorchipset_device_info *dev_info;
1228         int rc = CONTROLVM_RESP_SUCCESS;
1229
1230         dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1231         if (!dev_info) {
1232                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1233                                  POSTCODE_SEVERITY_ERR);
1234                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1235         } else if (dev_info->state.created == 0) {
1236                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1237                                  POSTCODE_SEVERITY_ERR);
1238                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1239         }
1240         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1241                 device_epilog(bus_no, dev_no, state,
1242                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1243                               inmsg->hdr.flags.response_expected == 1,
1244                               FOR_VISORBUS(
1245                                         dev_info->chan_info.channel_type_uuid));
1246 }
1247
1248 static void
1249 my_device_destroy(struct controlvm_message *inmsg)
1250 {
1251         struct controlvm_message_packet *cmd = &inmsg->cmd;
1252         u32 bus_no = cmd->destroy_device.bus_no;
1253         u32 dev_no = cmd->destroy_device.dev_no;
1254         struct visorchipset_device_info *dev_info;
1255         int rc = CONTROLVM_RESP_SUCCESS;
1256
1257         dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1258         if (!dev_info)
1259                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1260         else if (dev_info->state.created == 0)
1261                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1262
1263         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1264                 device_epilog(bus_no, dev_no, segment_state_running,
1265                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1266                               inmsg->hdr.flags.response_expected == 1,
1267                               FOR_VISORBUS(
1268                                         dev_info->chan_info.channel_type_uuid));
1269 }
1270
1271 /* When provided with the physical address of the controlvm channel
1272  * (phys_addr), the offset to the payload area we need to manage
1273  * (offset), and the size of this payload area (bytes), fills in the
1274  * controlvm_payload_info struct.  Returns true for success or false
1275  * for failure.
1276  */
1277 static int
1278 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1279                                   struct visor_controlvm_payload_info *info)
1280 {
1281         u8 __iomem *payload = NULL;
1282         int rc = CONTROLVM_RESP_SUCCESS;
1283
1284         if (!info) {
1285                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1286                 goto cleanup;
1287         }
1288         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1289         if ((offset == 0) || (bytes == 0)) {
1290                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1291                 goto cleanup;
1292         }
1293         payload = ioremap_cache(phys_addr + offset, bytes);
1294         if (!payload) {
1295                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1296                 goto cleanup;
1297         }
1298
1299         info->offset = offset;
1300         info->bytes = bytes;
1301         info->ptr = payload;
1302
1303 cleanup:
1304         if (rc < 0) {
1305                 if (payload) {
1306                         iounmap(payload);
1307                         payload = NULL;
1308                 }
1309         }
1310         return rc;
1311 }
1312
1313 static void
1314 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1315 {
1316         if (info->ptr) {
1317                 iounmap(info->ptr);
1318                 info->ptr = NULL;
1319         }
1320         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1321 }
1322
1323 static void
1324 initialize_controlvm_payload(void)
1325 {
1326         HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1327         u64 payload_offset = 0;
1328         u32 payload_bytes = 0;
1329
1330         if (visorchannel_read(controlvm_channel,
1331                               offsetof(struct spar_controlvm_channel_protocol,
1332                                        request_payload_offset),
1333                               &payload_offset, sizeof(payload_offset)) < 0) {
1334                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1335                                  POSTCODE_SEVERITY_ERR);
1336                 return;
1337         }
1338         if (visorchannel_read(controlvm_channel,
1339                               offsetof(struct spar_controlvm_channel_protocol,
1340                                        request_payload_bytes),
1341                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1342                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1343                                  POSTCODE_SEVERITY_ERR);
1344                 return;
1345         }
1346         initialize_controlvm_payload_info(phys_addr,
1347                                           payload_offset, payload_bytes,
1348                                           &controlvm_payload_info);
1349 }
1350
1351 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1352  *  Returns CONTROLVM_RESP_xxx code.
1353  */
1354 int
1355 visorchipset_chipset_ready(void)
1356 {
1357         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1358         return CONTROLVM_RESP_SUCCESS;
1359 }
1360 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1361
1362 int
1363 visorchipset_chipset_selftest(void)
1364 {
1365         char env_selftest[20];
1366         char *envp[] = { env_selftest, NULL };
1367
1368         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1369         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1370                            envp);
1371         return CONTROLVM_RESP_SUCCESS;
1372 }
1373 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1374
1375 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1376  *  Returns CONTROLVM_RESP_xxx code.
1377  */
1378 int
1379 visorchipset_chipset_notready(void)
1380 {
1381         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1382         return CONTROLVM_RESP_SUCCESS;
1383 }
1384 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1385
1386 static void
1387 chipset_ready(struct controlvm_message_header *msg_hdr)
1388 {
1389         int rc = visorchipset_chipset_ready();
1390
1391         if (rc != CONTROLVM_RESP_SUCCESS)
1392                 rc = -rc;
1393         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1394                 controlvm_respond(msg_hdr, rc);
1395         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1396                 /* Send CHIPSET_READY response when all modules have been loaded
1397                  * and disks mounted for the partition
1398                  */
1399                 g_chipset_msg_hdr = *msg_hdr;
1400         }
1401 }
1402
1403 static void
1404 chipset_selftest(struct controlvm_message_header *msg_hdr)
1405 {
1406         int rc = visorchipset_chipset_selftest();
1407
1408         if (rc != CONTROLVM_RESP_SUCCESS)
1409                 rc = -rc;
1410         if (msg_hdr->flags.response_expected)
1411                 controlvm_respond(msg_hdr, rc);
1412 }
1413
1414 static void
1415 chipset_notready(struct controlvm_message_header *msg_hdr)
1416 {
1417         int rc = visorchipset_chipset_notready();
1418
1419         if (rc != CONTROLVM_RESP_SUCCESS)
1420                 rc = -rc;
1421         if (msg_hdr->flags.response_expected)
1422                 controlvm_respond(msg_hdr, rc);
1423 }
1424
1425 /* This is your "one-stop" shop for grabbing the next message from the
1426  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1427  */
1428 static bool
1429 read_controlvm_event(struct controlvm_message *msg)
1430 {
1431         if (visorchannel_signalremove(controlvm_channel,
1432                                       CONTROLVM_QUEUE_EVENT, msg)) {
1433                 /* got a message */
1434                 if (msg->hdr.flags.test_message == 1)
1435                         return false;
1436                 return true;
1437         }
1438         return false;
1439 }
1440
1441 /*
1442  * The general parahotplug flow works as follows.  The visorchipset
1443  * driver receives a DEVICE_CHANGESTATE message from Command
1444  * specifying a physical device to enable or disable.  The CONTROLVM
1445  * message handler calls parahotplug_process_message, which then adds
1446  * the message to a global list and kicks off a udev event which
1447  * causes a user level script to enable or disable the specified
1448  * device.  The udev script then writes to
1449  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1450  * to get called, at which point the appropriate CONTROLVM message is
1451  * retrieved from the list and responded to.
1452  */
1453
1454 #define PARAHOTPLUG_TIMEOUT_MS 2000
1455
1456 /*
1457  * Generate unique int to match an outstanding CONTROLVM message with a
1458  * udev script /proc response
1459  */
1460 static int
1461 parahotplug_next_id(void)
1462 {
1463         static atomic_t id = ATOMIC_INIT(0);
1464
1465         return atomic_inc_return(&id);
1466 }
1467
1468 /*
1469  * Returns the time (in jiffies) when a CONTROLVM message on the list
1470  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1471  */
1472 static unsigned long
1473 parahotplug_next_expiration(void)
1474 {
1475         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1476 }
1477
1478 /*
1479  * Create a parahotplug_request, which is basically a wrapper for a
1480  * CONTROLVM_MESSAGE that we can stick on a list
1481  */
1482 static struct parahotplug_request *
1483 parahotplug_request_create(struct controlvm_message *msg)
1484 {
1485         struct parahotplug_request *req;
1486
1487         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1488         if (!req)
1489                 return NULL;
1490
1491         req->id = parahotplug_next_id();
1492         req->expiration = parahotplug_next_expiration();
1493         req->msg = *msg;
1494
1495         return req;
1496 }
1497
1498 /*
1499  * Free a parahotplug_request.
1500  */
1501 static void
1502 parahotplug_request_destroy(struct parahotplug_request *req)
1503 {
1504         kfree(req);
1505 }
1506
1507 /*
1508  * Cause uevent to run the user level script to do the disable/enable
1509  * specified in (the CONTROLVM message in) the specified
1510  * parahotplug_request
1511  */
1512 static void
1513 parahotplug_request_kickoff(struct parahotplug_request *req)
1514 {
1515         struct controlvm_message_packet *cmd = &req->msg.cmd;
1516         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1517             env_func[40];
1518         char *envp[] = {
1519                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1520         };
1521
1522         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1523         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1524         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1525                 cmd->device_change_state.state.active);
1526         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1527                 cmd->device_change_state.bus_no);
1528         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1529                 cmd->device_change_state.dev_no >> 3);
1530         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1531                 cmd->device_change_state.dev_no & 0x7);
1532
1533         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1534                            envp);
1535 }
1536
1537 /*
1538  * Remove any request from the list that's been on there too long and
1539  * respond with an error.
1540  */
1541 static void
1542 parahotplug_process_list(void)
1543 {
1544         struct list_head *pos;
1545         struct list_head *tmp;
1546
1547         spin_lock(&parahotplug_request_list_lock);
1548
1549         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1550                 struct parahotplug_request *req =
1551                     list_entry(pos, struct parahotplug_request, list);
1552
1553                 if (!time_after_eq(jiffies, req->expiration))
1554                         continue;
1555
1556                 list_del(pos);
1557                 if (req->msg.hdr.flags.response_expected)
1558                         controlvm_respond_physdev_changestate(
1559                                 &req->msg.hdr,
1560                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1561                                 req->msg.cmd.device_change_state.state);
1562                 parahotplug_request_destroy(req);
1563         }
1564
1565         spin_unlock(&parahotplug_request_list_lock);
1566 }
1567
1568 /*
1569  * Called from the /proc handler, which means the user script has
1570  * finished the enable/disable.  Find the matching identifier, and
1571  * respond to the CONTROLVM message with success.
1572  */
1573 static int
1574 parahotplug_request_complete(int id, u16 active)
1575 {
1576         struct list_head *pos;
1577         struct list_head *tmp;
1578
1579         spin_lock(&parahotplug_request_list_lock);
1580
1581         /* Look for a request matching "id". */
1582         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1583                 struct parahotplug_request *req =
1584                     list_entry(pos, struct parahotplug_request, list);
1585                 if (req->id == id) {
1586                         /* Found a match.  Remove it from the list and
1587                          * respond.
1588                          */
1589                         list_del(pos);
1590                         spin_unlock(&parahotplug_request_list_lock);
1591                         req->msg.cmd.device_change_state.state.active = active;
1592                         if (req->msg.hdr.flags.response_expected)
1593                                 controlvm_respond_physdev_changestate(
1594                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1595                                         req->msg.cmd.device_change_state.state);
1596                         parahotplug_request_destroy(req);
1597                         return 0;
1598                 }
1599         }
1600
1601         spin_unlock(&parahotplug_request_list_lock);
1602         return -1;
1603 }
1604
1605 /*
1606  * Enables or disables a PCI device by kicking off a udev script
1607  */
1608 static void
1609 parahotplug_process_message(struct controlvm_message *inmsg)
1610 {
1611         struct parahotplug_request *req;
1612
1613         req = parahotplug_request_create(inmsg);
1614
1615         if (!req)
1616                 return;
1617
1618         if (inmsg->cmd.device_change_state.state.active) {
1619                 /* For enable messages, just respond with success
1620                 * right away.  This is a bit of a hack, but there are
1621                 * issues with the early enable messages we get (with
1622                 * either the udev script not detecting that the device
1623                 * is up, or not getting called at all).  Fortunately
1624                 * the messages that get lost don't matter anyway, as
1625                 * devices are automatically enabled at
1626                 * initialization.
1627                 */
1628                 parahotplug_request_kickoff(req);
1629                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1630                         CONTROLVM_RESP_SUCCESS,
1631                         inmsg->cmd.device_change_state.state);
1632                 parahotplug_request_destroy(req);
1633         } else {
1634                 /* For disable messages, add the request to the
1635                 * request list before kicking off the udev script.  It
1636                 * won't get responded to until the script has
1637                 * indicated it's done.
1638                 */
1639                 spin_lock(&parahotplug_request_list_lock);
1640                 list_add_tail(&req->list, &parahotplug_request_list);
1641                 spin_unlock(&parahotplug_request_list_lock);
1642
1643                 parahotplug_request_kickoff(req);
1644         }
1645 }
1646
1647 /* Process a controlvm message.
1648  * Return result:
1649  *    false - this function will return FALSE only in the case where the
1650  *            controlvm message was NOT processed, but processing must be
1651  *            retried before reading the next controlvm message; a
1652  *            scenario where this can occur is when we need to throttle
1653  *            the allocation of memory in which to copy out controlvm
1654  *            payload data
1655  *    true  - processing of the controlvm message completed,
1656  *            either successfully or with an error.
1657  */
1658 static bool
1659 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1660 {
1661         struct controlvm_message_packet *cmd = &inmsg.cmd;
1662         u64 parm_addr;
1663         u32 parm_bytes;
1664         struct parser_context *parser_ctx = NULL;
1665         bool local_addr;
1666         struct controlvm_message ackmsg;
1667
1668         /* create parsing context if necessary */
1669         local_addr = (inmsg.hdr.flags.test_message == 1);
1670         if (channel_addr == 0)
1671                 return true;
1672         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1673         parm_bytes = inmsg.hdr.payload_bytes;
1674
1675         /* Parameter and channel addresses within test messages actually lie
1676          * within our OS-controlled memory.  We need to know that, because it
1677          * makes a difference in how we compute the virtual address.
1678          */
1679         if (parm_addr != 0 && parm_bytes != 0) {
1680                 bool retry = false;
1681
1682                 parser_ctx =
1683                     parser_init_byte_stream(parm_addr, parm_bytes,
1684                                             local_addr, &retry);
1685                 if (!parser_ctx && retry)
1686                         return false;
1687         }
1688
1689         if (!local_addr) {
1690                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1691                                         CONTROLVM_RESP_SUCCESS);
1692                 if (controlvm_channel)
1693                         visorchannel_signalinsert(controlvm_channel,
1694                                                   CONTROLVM_QUEUE_ACK,
1695                                                   &ackmsg);
1696         }
1697         switch (inmsg.hdr.id) {
1698         case CONTROLVM_CHIPSET_INIT:
1699                 chipset_init(&inmsg);
1700                 break;
1701         case CONTROLVM_BUS_CREATE:
1702                 bus_create(&inmsg);
1703                 break;
1704         case CONTROLVM_BUS_DESTROY:
1705                 bus_destroy(&inmsg);
1706                 break;
1707         case CONTROLVM_BUS_CONFIGURE:
1708                 bus_configure(&inmsg, parser_ctx);
1709                 break;
1710         case CONTROLVM_DEVICE_CREATE:
1711                 my_device_create(&inmsg);
1712                 break;
1713         case CONTROLVM_DEVICE_CHANGESTATE:
1714                 if (cmd->device_change_state.flags.phys_device) {
1715                         parahotplug_process_message(&inmsg);
1716                 } else {
1717                         /* save the hdr and cmd structures for later use */
1718                         /* when sending back the response to Command */
1719                         my_device_changestate(&inmsg);
1720                         g_diag_msg_hdr = inmsg.hdr;
1721                         g_devicechangestate_packet = inmsg.cmd;
1722                         break;
1723                 }
1724                 break;
1725         case CONTROLVM_DEVICE_DESTROY:
1726                 my_device_destroy(&inmsg);
1727                 break;
1728         case CONTROLVM_DEVICE_CONFIGURE:
1729                 /* no op for now, just send a respond that we passed */
1730                 if (inmsg.hdr.flags.response_expected)
1731                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1732                 break;
1733         case CONTROLVM_CHIPSET_READY:
1734                 chipset_ready(&inmsg.hdr);
1735                 break;
1736         case CONTROLVM_CHIPSET_SELFTEST:
1737                 chipset_selftest(&inmsg.hdr);
1738                 break;
1739         case CONTROLVM_CHIPSET_STOP:
1740                 chipset_notready(&inmsg.hdr);
1741                 break;
1742         default:
1743                 if (inmsg.hdr.flags.response_expected)
1744                         controlvm_respond(&inmsg.hdr,
1745                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1746                 break;
1747         }
1748
1749         if (parser_ctx) {
1750                 parser_done(parser_ctx);
1751                 parser_ctx = NULL;
1752         }
1753         return true;
1754 }
1755
1756 static HOSTADDRESS controlvm_get_channel_address(void)
1757 {
1758         u64 addr = 0;
1759         u32 size = 0;
1760
1761         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1762                 return 0;
1763
1764         return addr;
1765 }
1766
1767 static void
1768 controlvm_periodic_work(struct work_struct *work)
1769 {
1770         struct controlvm_message inmsg;
1771         bool got_command = false;
1772         bool handle_command_failed = false;
1773         static u64 poll_count;
1774
1775         /* make sure visorbus server is registered for controlvm callbacks */
1776         if (visorchipset_serverregwait && !serverregistered)
1777                 goto cleanup;
1778         /* make sure visorclientbus server is regsitered for controlvm
1779          * callbacks
1780          */
1781         if (visorchipset_clientregwait && !clientregistered)
1782                 goto cleanup;
1783
1784         poll_count++;
1785         if (poll_count >= 250)
1786                 ;       /* keep going */
1787         else
1788                 goto cleanup;
1789
1790         /* Check events to determine if response to CHIPSET_READY
1791          * should be sent
1792          */
1793         if (visorchipset_holdchipsetready &&
1794             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1795                 if (check_chipset_events() == 1) {
1796                         controlvm_respond(&g_chipset_msg_hdr, 0);
1797                         clear_chipset_events();
1798                         memset(&g_chipset_msg_hdr, 0,
1799                                sizeof(struct controlvm_message_header));
1800                 }
1801         }
1802
1803         while (visorchannel_signalremove(controlvm_channel,
1804                                          CONTROLVM_QUEUE_RESPONSE,
1805                                          &inmsg))
1806                 ;
1807         if (!got_command) {
1808                 if (controlvm_pending_msg_valid) {
1809                         /* we throttled processing of a prior
1810                         * msg, so try to process it again
1811                         * rather than reading a new one
1812                         */
1813                         inmsg = controlvm_pending_msg;
1814                         controlvm_pending_msg_valid = false;
1815                         got_command = true;
1816                 } else {
1817                         got_command = read_controlvm_event(&inmsg);
1818                 }
1819         }
1820
1821         handle_command_failed = false;
1822         while (got_command && (!handle_command_failed)) {
1823                 most_recent_message_jiffies = jiffies;
1824                 if (handle_command(inmsg,
1825                                    visorchannel_get_physaddr
1826                                    (controlvm_channel)))
1827                         got_command = read_controlvm_event(&inmsg);
1828                 else {
1829                         /* this is a scenario where throttling
1830                         * is required, but probably NOT an
1831                         * error...; we stash the current
1832                         * controlvm msg so we will attempt to
1833                         * reprocess it on our next loop
1834                         */
1835                         handle_command_failed = true;
1836                         controlvm_pending_msg = inmsg;
1837                         controlvm_pending_msg_valid = true;
1838                 }
1839         }
1840
1841         /* parahotplug_worker */
1842         parahotplug_process_list();
1843
1844 cleanup:
1845
1846         if (time_after(jiffies,
1847                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1848                 /* it's been longer than MIN_IDLE_SECONDS since we
1849                 * processed our last controlvm message; slow down the
1850                 * polling
1851                 */
1852                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1853                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1854         } else {
1855                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1856                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1857         }
1858
1859         queue_delayed_work(periodic_controlvm_workqueue,
1860                            &periodic_controlvm_work, poll_jiffies);
1861 }
1862
1863 static void
1864 setup_crash_devices_work_queue(struct work_struct *work)
1865 {
1866         struct controlvm_message local_crash_bus_msg;
1867         struct controlvm_message local_crash_dev_msg;
1868         struct controlvm_message msg;
1869         u32 local_crash_msg_offset;
1870         u16 local_crash_msg_count;
1871
1872         /* make sure visorbus server is registered for controlvm callbacks */
1873         if (visorchipset_serverregwait && !serverregistered)
1874                 goto cleanup;
1875
1876         /* make sure visorclientbus server is regsitered for controlvm
1877          * callbacks
1878          */
1879         if (visorchipset_clientregwait && !clientregistered)
1880                 goto cleanup;
1881
1882         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1883
1884         /* send init chipset msg */
1885         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1886         msg.cmd.init_chipset.bus_count = 23;
1887         msg.cmd.init_chipset.switch_count = 0;
1888
1889         chipset_init(&msg);
1890
1891         /* get saved message count */
1892         if (visorchannel_read(controlvm_channel,
1893                               offsetof(struct spar_controlvm_channel_protocol,
1894                                        saved_crash_message_count),
1895                               &local_crash_msg_count, sizeof(u16)) < 0) {
1896                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1897                                  POSTCODE_SEVERITY_ERR);
1898                 return;
1899         }
1900
1901         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1902                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1903                                  local_crash_msg_count,
1904                                  POSTCODE_SEVERITY_ERR);
1905                 return;
1906         }
1907
1908         /* get saved crash message offset */
1909         if (visorchannel_read(controlvm_channel,
1910                               offsetof(struct spar_controlvm_channel_protocol,
1911                                        saved_crash_message_offset),
1912                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1913                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1914                                  POSTCODE_SEVERITY_ERR);
1915                 return;
1916         }
1917
1918         /* read create device message for storage bus offset */
1919         if (visorchannel_read(controlvm_channel,
1920                               local_crash_msg_offset,
1921                               &local_crash_bus_msg,
1922                               sizeof(struct controlvm_message)) < 0) {
1923                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1924                                  POSTCODE_SEVERITY_ERR);
1925                 return;
1926         }
1927
1928         /* read create device message for storage device */
1929         if (visorchannel_read(controlvm_channel,
1930                               local_crash_msg_offset +
1931                               sizeof(struct controlvm_message),
1932                               &local_crash_dev_msg,
1933                               sizeof(struct controlvm_message)) < 0) {
1934                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1935                                  POSTCODE_SEVERITY_ERR);
1936                 return;
1937         }
1938
1939         /* reuse IOVM create bus message */
1940         if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1941                 bus_create(&local_crash_bus_msg);
1942         } else {
1943                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1944                                  POSTCODE_SEVERITY_ERR);
1945                 return;
1946         }
1947
1948         /* reuse create device message for storage device */
1949         if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1950                 my_device_create(&local_crash_dev_msg);
1951         } else {
1952                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1953                                  POSTCODE_SEVERITY_ERR);
1954                 return;
1955         }
1956         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1957         return;
1958
1959 cleanup:
1960
1961         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1962
1963         queue_delayed_work(periodic_controlvm_workqueue,
1964                            &periodic_controlvm_work, poll_jiffies);
1965 }
1966
1967 static void
1968 bus_create_response(u32 bus_no, int response)
1969 {
1970         bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
1971 }
1972
1973 static void
1974 bus_destroy_response(u32 bus_no, int response)
1975 {
1976         bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
1977 }
1978
1979 static void
1980 device_create_response(u32 bus_no, u32 dev_no, int response)
1981 {
1982         device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
1983 }
1984
1985 static void
1986 device_destroy_response(u32 bus_no, u32 dev_no, int response)
1987 {
1988         device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
1989 }
1990
1991 void
1992 visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
1993 {
1994         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1995                                      bus_no, dev_no, response,
1996                                      segment_state_standby);
1997 }
1998 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1999
2000 static void
2001 device_resume_response(u32 bus_no, u32 dev_no, int response)
2002 {
2003         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2004                                      bus_no, dev_no, response,
2005                                      segment_state_running);
2006 }
2007
2008 bool
2009 visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2010 {
2011         void *p = bus_find(&bus_info_list, bus_no);
2012
2013         if (!p)
2014                 return false;
2015         memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2016         return true;
2017 }
2018 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2019
2020 bool
2021 visorchipset_set_bus_context(u32 bus_no, void *context)
2022 {
2023         struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
2024
2025         if (!p)
2026                 return false;
2027         p->bus_driver_context = context;
2028         return true;
2029 }
2030 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2031
2032 bool
2033 visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2034                              struct visorchipset_device_info *dev_info)
2035 {
2036         void *p = finddevice(&dev_info_list, bus_no, dev_no);
2037
2038         if (!p)
2039                 return false;
2040         memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2041         return true;
2042 }
2043 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2044
2045 bool
2046 visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
2047 {
2048         struct visorchipset_device_info *p =
2049                         finddevice(&dev_info_list, bus_no, dev_no);
2050
2051         if (!p)
2052                 return false;
2053         p->bus_driver_context = context;
2054         return true;
2055 }
2056 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2057
2058 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2059  */
2060 void *
2061 visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
2062                          char *fn, int ln)
2063 {
2064         gfp_t gfp;
2065         void *p;
2066
2067         if (ok_to_block)
2068                 gfp = GFP_KERNEL;
2069         else
2070                 gfp = GFP_ATOMIC;
2071         /* __GFP_NORETRY means "ok to fail", meaning
2072          * kmem_cache_alloc() can return NULL, implying the caller CAN
2073          * cope with failure.  If you do NOT specify __GFP_NORETRY,
2074          * Linux will go to extreme measures to get memory for you
2075          * (like, invoke oom killer), which will probably cripple the
2076          * system.
2077          */
2078         gfp |= __GFP_NORETRY;
2079         p = kmem_cache_alloc(pool, gfp);
2080         if (!p)
2081                 return NULL;
2082
2083         return p;
2084 }
2085
2086 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2087  */
2088 void
2089 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2090 {
2091         if (!p)
2092                 return;
2093
2094         kmem_cache_free(pool, p);
2095 }
2096
2097 static ssize_t chipsetready_store(struct device *dev,
2098                                   struct device_attribute *attr,
2099                                   const char *buf, size_t count)
2100 {
2101         char msgtype[64];
2102
2103         if (sscanf(buf, "%63s", msgtype) != 1)
2104                 return -EINVAL;
2105
2106         if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2107                 chipset_events[0] = 1;
2108                 return count;
2109         } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2110                 chipset_events[1] = 1;
2111                 return count;
2112         }
2113         return -EINVAL;
2114 }
2115
2116 /* The parahotplug/devicedisabled interface gets called by our support script
2117  * when an SR-IOV device has been shut down. The ID is passed to the script
2118  * and then passed back when the device has been removed.
2119  */
2120 static ssize_t devicedisabled_store(struct device *dev,
2121                                     struct device_attribute *attr,
2122                                     const char *buf, size_t count)
2123 {
2124         unsigned int id;
2125
2126         if (kstrtouint(buf, 10, &id) != 0)
2127                 return -EINVAL;
2128
2129         parahotplug_request_complete(id, 0);
2130         return count;
2131 }
2132
2133 /* The parahotplug/deviceenabled interface gets called by our support script
2134  * when an SR-IOV device has been recovered. The ID is passed to the script
2135  * and then passed back when the device has been brought back up.
2136  */
2137 static ssize_t deviceenabled_store(struct device *dev,
2138                                    struct device_attribute *attr,
2139                                    const char *buf, size_t count)
2140 {
2141         unsigned int id;
2142
2143         if (kstrtouint(buf, 10, &id) != 0)
2144                 return -EINVAL;
2145
2146         parahotplug_request_complete(id, 1);
2147         return count;
2148 }
2149
2150 static int __init
2151 visorchipset_init(void)
2152 {
2153         int rc = 0, x = 0;
2154         HOSTADDRESS addr;
2155
2156         if (!unisys_spar_platform)
2157                 return -ENODEV;
2158
2159         memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2160         memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
2161         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2162         memset(&livedump_info, 0, sizeof(livedump_info));
2163         atomic_set(&livedump_info.buffers_in_use, 0);
2164
2165         if (visorchipset_testvnic) {
2166                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2167                 rc = x;
2168                 goto cleanup;
2169         }
2170
2171         addr = controlvm_get_channel_address();
2172         if (addr != 0) {
2173                 controlvm_channel =
2174                     visorchannel_create_with_lock
2175                     (addr,
2176                      sizeof(struct spar_controlvm_channel_protocol),
2177                      spar_controlvm_channel_protocol_uuid);
2178                 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2179                                 visorchannel_get_header(controlvm_channel))) {
2180                         initialize_controlvm_payload();
2181                 } else {
2182                         visorchannel_destroy(controlvm_channel);
2183                         controlvm_channel = NULL;
2184                         return -ENODEV;
2185                 }
2186         } else {
2187                 return -ENODEV;
2188         }
2189
2190         major_dev = MKDEV(visorchipset_major, 0);
2191         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2192         if (rc < 0) {
2193                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2194                 goto cleanup;
2195         }
2196
2197         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2198
2199         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2200
2201         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2202
2203         if (!visorchipset_disable_controlvm) {
2204                 /* if booting in a crash kernel */
2205                 if (is_kdump_kernel())
2206                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2207                                           setup_crash_devices_work_queue);
2208                 else
2209                         INIT_DELAYED_WORK(&periodic_controlvm_work,
2210                                           controlvm_periodic_work);
2211                 periodic_controlvm_workqueue =
2212                     create_singlethread_workqueue("visorchipset_controlvm");
2213
2214                 if (!periodic_controlvm_workqueue) {
2215                         POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2216                                          DIAG_SEVERITY_ERR);
2217                         rc = -ENOMEM;
2218                         goto cleanup;
2219                 }
2220                 most_recent_message_jiffies = jiffies;
2221                 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2222                 rc = queue_delayed_work(periodic_controlvm_workqueue,
2223                                         &periodic_controlvm_work, poll_jiffies);
2224                 if (rc < 0) {
2225                         POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2226                                          DIAG_SEVERITY_ERR);
2227                         goto cleanup;
2228                 }
2229         }
2230
2231         visorchipset_platform_device.dev.devt = major_dev;
2232         if (platform_device_register(&visorchipset_platform_device) < 0) {
2233                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2234                 rc = -1;
2235                 goto cleanup;
2236         }
2237         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2238         rc = 0;
2239 cleanup:
2240         if (rc) {
2241                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2242                                  POSTCODE_SEVERITY_ERR);
2243         }
2244         return rc;
2245 }
2246
2247 static void
2248 visorchipset_exit(void)
2249 {
2250         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2251
2252         if (visorchipset_disable_controlvm) {
2253                 ;
2254         } else {
2255                 cancel_delayed_work(&periodic_controlvm_work);
2256                 flush_workqueue(periodic_controlvm_workqueue);
2257                 destroy_workqueue(periodic_controlvm_workqueue);
2258                 periodic_controlvm_workqueue = NULL;
2259                 destroy_controlvm_payload_info(&controlvm_payload_info);
2260         }
2261
2262         cleanup_controlvm_structures();
2263
2264         memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2265
2266         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2267
2268         memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2269
2270         visorchannel_destroy(controlvm_channel);
2271
2272         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2273         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2274 }
2275
2276 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2277 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2278 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2279 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2280 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2281 MODULE_PARM_DESC(visorchipset_testmsg,
2282                  "1 to manufacture the chipset, bus, and switch messages");
2283 module_param_named(major, visorchipset_major, int, S_IRUGO);
2284 MODULE_PARM_DESC(visorchipset_major,
2285                  "major device number to use for the device node");
2286 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2287 MODULE_PARM_DESC(visorchipset_serverreqwait,
2288                  "1 to have the module wait for the visor bus to register");
2289 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2290 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2291 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2292 MODULE_PARM_DESC(visorchipset_testteardown,
2293                  "1 to test teardown of the chipset, bus, and switch");
2294 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2295                    S_IRUGO);
2296 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2297                  "1 to disable polling of controlVm channel");
2298 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2299                    int, S_IRUGO);
2300 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2301                  "1 to hold response to CHIPSET_READY");
2302
2303 module_init(visorchipset_init);
2304 module_exit(visorchipset_exit);
2305
2306 MODULE_AUTHOR("Unisys");
2307 MODULE_LICENSE("GPL");
2308 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2309                    VERSION);
2310 MODULE_VERSION(VERSION);