Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / drivers / acpi / ec.c
1 /*
2  *  ec.c - ACPI Embedded Controller Driver (v3)
3  *
4  *  Copyright (C) 2001-2015 Intel Corporation
5  *    Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
6  *            2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7  *            2006       Denis Sadykov <denis.m.sadykov@intel.com>
8  *            2004       Luming Yu <luming.yu@intel.com>
9  *            2001, 2002 Andy Grover <andrew.grover@intel.com>
10  *            2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11  *  Copyright (C) 2008      Alexey Starikovskiy <astarikovskiy@suse.de>
12  *
13  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14  *
15  *  This program is free software; you can redistribute it and/or modify
16  *  it under the terms of the GNU General Public License as published by
17  *  the Free Software Foundation; either version 2 of the License, or (at
18  *  your option) any later version.
19  *
20  *  This program is distributed in the hope that it will be useful, but
21  *  WITHOUT ANY WARRANTY; without even the implied warranty of
22  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23  *  General Public License for more details.
24  *
25  *  You should have received a copy of the GNU General Public License along
26  *  with this program; if not, write to the Free Software Foundation, Inc.,
27  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28  *
29  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
30  */
31
32 /* Uncomment next line to get verbose printout */
33 /* #define DEBUG */
34 #define pr_fmt(fmt) "ACPI : EC: " fmt
35
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/types.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/list.h>
43 #include <linux/spinlock.h>
44 #include <linux/slab.h>
45 #include <linux/acpi.h>
46 #include <linux/dmi.h>
47 #include <asm/io.h>
48
49 #include "internal.h"
50
51 #define ACPI_EC_CLASS                   "embedded_controller"
52 #define ACPI_EC_DEVICE_NAME             "Embedded Controller"
53 #define ACPI_EC_FILE_INFO               "info"
54
55 /* EC status register */
56 #define ACPI_EC_FLAG_OBF        0x01    /* Output buffer full */
57 #define ACPI_EC_FLAG_IBF        0x02    /* Input buffer full */
58 #define ACPI_EC_FLAG_CMD        0x08    /* Input buffer contains a command */
59 #define ACPI_EC_FLAG_BURST      0x10    /* burst mode */
60 #define ACPI_EC_FLAG_SCI        0x20    /* EC-SCI occurred */
61
62 /* EC commands */
63 enum ec_command {
64         ACPI_EC_COMMAND_READ = 0x80,
65         ACPI_EC_COMMAND_WRITE = 0x81,
66         ACPI_EC_BURST_ENABLE = 0x82,
67         ACPI_EC_BURST_DISABLE = 0x83,
68         ACPI_EC_COMMAND_QUERY = 0x84,
69 };
70
71 #define ACPI_EC_DELAY           500     /* Wait 500ms max. during EC ops */
72 #define ACPI_EC_UDELAY_GLK      1000    /* Wait 1ms max. to get global lock */
73 #define ACPI_EC_MSI_UDELAY      550     /* Wait 550us for MSI EC */
74 #define ACPI_EC_UDELAY_POLL     1000    /* Wait 1ms for EC transaction polling */
75 #define ACPI_EC_CLEAR_MAX       100     /* Maximum number of events to query
76                                          * when trying to clear the EC */
77
78 enum {
79         EC_FLAGS_QUERY_PENDING,         /* Query is pending */
80         EC_FLAGS_HANDLERS_INSTALLED,    /* Handlers for GPE and
81                                          * OpReg are installed */
82         EC_FLAGS_STARTED,               /* Driver is started */
83         EC_FLAGS_STOPPED,               /* Driver is stopped */
84         EC_FLAGS_COMMAND_STORM,         /* GPE storms occurred to the
85                                          * current command processing */
86 };
87
88 #define ACPI_EC_COMMAND_POLL            0x01 /* Available for command byte */
89 #define ACPI_EC_COMMAND_COMPLETE        0x02 /* Completed last byte */
90
91 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
92 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
93 module_param(ec_delay, uint, 0644);
94 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
95
96 /*
97  * If the number of false interrupts per one transaction exceeds
98  * this threshold, will think there is a GPE storm happened and
99  * will disable the GPE for normal transaction.
100  */
101 static unsigned int ec_storm_threshold  __read_mostly = 8;
102 module_param(ec_storm_threshold, uint, 0644);
103 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
104
105 struct acpi_ec_query_handler {
106         struct list_head node;
107         acpi_ec_query_func func;
108         acpi_handle handle;
109         void *data;
110         u8 query_bit;
111         struct kref kref;
112 };
113
114 struct transaction {
115         const u8 *wdata;
116         u8 *rdata;
117         unsigned short irq_count;
118         u8 command;
119         u8 wi;
120         u8 ri;
121         u8 wlen;
122         u8 rlen;
123         u8 flags;
124         unsigned long timestamp;
125 };
126
127 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
128 static void advance_transaction(struct acpi_ec *ec);
129
130 struct acpi_ec *boot_ec, *first_ec;
131 EXPORT_SYMBOL(first_ec);
132
133 static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
134 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
135 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
136 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
137 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
138
139 /* --------------------------------------------------------------------------
140  *                           Device Flags
141  * -------------------------------------------------------------------------- */
142
143 static bool acpi_ec_started(struct acpi_ec *ec)
144 {
145         return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
146                !test_bit(EC_FLAGS_STOPPED, &ec->flags);
147 }
148
149 static bool acpi_ec_flushed(struct acpi_ec *ec)
150 {
151         return ec->reference_count == 1;
152 }
153
154 /* --------------------------------------------------------------------------
155  *                           EC Registers
156  * -------------------------------------------------------------------------- */
157
158 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
159 {
160         u8 x = inb(ec->command_addr);
161
162         pr_debug("EC_SC(R) = 0x%2.2x "
163                  "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
164                  x,
165                  !!(x & ACPI_EC_FLAG_SCI),
166                  !!(x & ACPI_EC_FLAG_BURST),
167                  !!(x & ACPI_EC_FLAG_CMD),
168                  !!(x & ACPI_EC_FLAG_IBF),
169                  !!(x & ACPI_EC_FLAG_OBF));
170         return x;
171 }
172
173 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
174 {
175         u8 x = inb(ec->data_addr);
176
177         ec->curr->timestamp = jiffies;
178         pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
179         return x;
180 }
181
182 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
183 {
184         pr_debug("EC_SC(W) = 0x%2.2x\n", command);
185         outb(command, ec->command_addr);
186         ec->curr->timestamp = jiffies;
187 }
188
189 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
190 {
191         pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
192         outb(data, ec->data_addr);
193         ec->curr->timestamp = jiffies;
194 }
195
196 #ifdef DEBUG
197 static const char *acpi_ec_cmd_string(u8 cmd)
198 {
199         switch (cmd) {
200         case 0x80:
201                 return "RD_EC";
202         case 0x81:
203                 return "WR_EC";
204         case 0x82:
205                 return "BE_EC";
206         case 0x83:
207                 return "BD_EC";
208         case 0x84:
209                 return "QR_EC";
210         }
211         return "UNKNOWN";
212 }
213 #else
214 #define acpi_ec_cmd_string(cmd)         "UNDEF"
215 #endif
216
217 /* --------------------------------------------------------------------------
218  *                           GPE Registers
219  * -------------------------------------------------------------------------- */
220
221 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
222 {
223         acpi_event_status gpe_status = 0;
224
225         (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
226         return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
227 }
228
229 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
230 {
231         if (open)
232                 acpi_enable_gpe(NULL, ec->gpe);
233         else {
234                 BUG_ON(ec->reference_count < 1);
235                 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
236         }
237         if (acpi_ec_is_gpe_raised(ec)) {
238                 /*
239                  * On some platforms, EN=1 writes cannot trigger GPE. So
240                  * software need to manually trigger a pseudo GPE event on
241                  * EN=1 writes.
242                  */
243                 pr_debug("***** Polling quirk *****\n");
244                 advance_transaction(ec);
245         }
246 }
247
248 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
249 {
250         if (close)
251                 acpi_disable_gpe(NULL, ec->gpe);
252         else {
253                 BUG_ON(ec->reference_count < 1);
254                 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
255         }
256 }
257
258 static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
259 {
260         /*
261          * GPE STS is a W1C register, which means:
262          * 1. Software can clear it without worrying about clearing other
263          *    GPEs' STS bits when the hardware sets them in parallel.
264          * 2. As long as software can ensure only clearing it when it is
265          *    set, hardware won't set it in parallel.
266          * So software can clear GPE in any contexts.
267          * Warning: do not move the check into advance_transaction() as the
268          * EC commands will be sent without GPE raised.
269          */
270         if (!acpi_ec_is_gpe_raised(ec))
271                 return;
272         acpi_clear_gpe(NULL, ec->gpe);
273 }
274
275 /* --------------------------------------------------------------------------
276  *                           Transaction Management
277  * -------------------------------------------------------------------------- */
278
279 static void acpi_ec_submit_request(struct acpi_ec *ec)
280 {
281         ec->reference_count++;
282         if (ec->reference_count == 1)
283                 acpi_ec_enable_gpe(ec, true);
284 }
285
286 static void acpi_ec_complete_request(struct acpi_ec *ec)
287 {
288         bool flushed = false;
289
290         ec->reference_count--;
291         if (ec->reference_count == 0)
292                 acpi_ec_disable_gpe(ec, true);
293         flushed = acpi_ec_flushed(ec);
294         if (flushed)
295                 wake_up(&ec->wait);
296 }
297
298 static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
299 {
300         if (!test_bit(flag, &ec->flags)) {
301                 acpi_ec_disable_gpe(ec, false);
302                 pr_debug("+++++ Polling enabled +++++\n");
303                 set_bit(flag, &ec->flags);
304         }
305 }
306
307 static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
308 {
309         if (test_bit(flag, &ec->flags)) {
310                 clear_bit(flag, &ec->flags);
311                 acpi_ec_enable_gpe(ec, false);
312                 pr_debug("+++++ Polling disabled +++++\n");
313         }
314 }
315
316 /*
317  * acpi_ec_submit_flushable_request() - Increase the reference count unless
318  *                                      the flush operation is not in
319  *                                      progress
320  * @ec: the EC device
321  *
322  * This function must be used before taking a new action that should hold
323  * the reference count.  If this function returns false, then the action
324  * must be discarded or it will prevent the flush operation from being
325  * completed.
326  */
327 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
328 {
329         if (!acpi_ec_started(ec))
330                 return false;
331         acpi_ec_submit_request(ec);
332         return true;
333 }
334
335 static void acpi_ec_submit_query(struct acpi_ec *ec)
336 {
337         if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
338                 pr_debug("***** Event started *****\n");
339                 schedule_work(&ec->work);
340         }
341 }
342
343 static void acpi_ec_complete_query(struct acpi_ec *ec)
344 {
345         if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
346                 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
347                 pr_debug("***** Event stopped *****\n");
348         }
349 }
350
351 static int ec_transaction_completed(struct acpi_ec *ec)
352 {
353         unsigned long flags;
354         int ret = 0;
355
356         spin_lock_irqsave(&ec->lock, flags);
357         if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
358                 ret = 1;
359         spin_unlock_irqrestore(&ec->lock, flags);
360         return ret;
361 }
362
363 static void advance_transaction(struct acpi_ec *ec)
364 {
365         struct transaction *t;
366         u8 status;
367         bool wakeup = false;
368
369         pr_debug("===== %s (%d) =====\n",
370                  in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
371         /*
372          * By always clearing STS before handling all indications, we can
373          * ensure a hardware STS 0->1 change after this clearing can always
374          * trigger a GPE interrupt.
375          */
376         acpi_ec_clear_gpe(ec);
377         status = acpi_ec_read_status(ec);
378         t = ec->curr;
379         if (!t)
380                 goto err;
381         if (t->flags & ACPI_EC_COMMAND_POLL) {
382                 if (t->wlen > t->wi) {
383                         if ((status & ACPI_EC_FLAG_IBF) == 0)
384                                 acpi_ec_write_data(ec, t->wdata[t->wi++]);
385                         else
386                                 goto err;
387                 } else if (t->rlen > t->ri) {
388                         if ((status & ACPI_EC_FLAG_OBF) == 1) {
389                                 t->rdata[t->ri++] = acpi_ec_read_data(ec);
390                                 if (t->rlen == t->ri) {
391                                         t->flags |= ACPI_EC_COMMAND_COMPLETE;
392                                         if (t->command == ACPI_EC_COMMAND_QUERY)
393                                                 pr_debug("***** Command(%s) hardware completion *****\n",
394                                                          acpi_ec_cmd_string(t->command));
395                                         wakeup = true;
396                                 }
397                         } else
398                                 goto err;
399                 } else if (t->wlen == t->wi &&
400                            (status & ACPI_EC_FLAG_IBF) == 0) {
401                         t->flags |= ACPI_EC_COMMAND_COMPLETE;
402                         wakeup = true;
403                 }
404                 goto out;
405         } else {
406                 if (EC_FLAGS_QUERY_HANDSHAKE &&
407                     !(status & ACPI_EC_FLAG_SCI) &&
408                     (t->command == ACPI_EC_COMMAND_QUERY)) {
409                         t->flags |= ACPI_EC_COMMAND_POLL;
410                         acpi_ec_complete_query(ec);
411                         t->rdata[t->ri++] = 0x00;
412                         t->flags |= ACPI_EC_COMMAND_COMPLETE;
413                         pr_debug("***** Command(%s) software completion *****\n",
414                                  acpi_ec_cmd_string(t->command));
415                         wakeup = true;
416                 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
417                         acpi_ec_write_cmd(ec, t->command);
418                         t->flags |= ACPI_EC_COMMAND_POLL;
419                         acpi_ec_complete_query(ec);
420                 } else
421                         goto err;
422                 goto out;
423         }
424 err:
425         /*
426          * If SCI bit is set, then don't think it's a false IRQ
427          * otherwise will take a not handled IRQ as a false one.
428          */
429         if (!(status & ACPI_EC_FLAG_SCI)) {
430                 if (in_interrupt() && t) {
431                         if (t->irq_count < ec_storm_threshold)
432                                 ++t->irq_count;
433                         /* Allow triggering on 0 threshold */
434                         if (t->irq_count == ec_storm_threshold)
435                                 acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
436                 }
437         }
438 out:
439         if (status & ACPI_EC_FLAG_SCI)
440                 acpi_ec_submit_query(ec);
441         if (wakeup && in_interrupt())
442                 wake_up(&ec->wait);
443 }
444
445 static void start_transaction(struct acpi_ec *ec)
446 {
447         ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
448         ec->curr->flags = 0;
449         ec->curr->timestamp = jiffies;
450         advance_transaction(ec);
451 }
452
453 static int ec_poll(struct acpi_ec *ec)
454 {
455         unsigned long flags;
456         int repeat = 5; /* number of command restarts */
457
458         while (repeat--) {
459                 unsigned long delay = jiffies +
460                         msecs_to_jiffies(ec_delay);
461                 unsigned long usecs = ACPI_EC_UDELAY_POLL;
462                 do {
463                         /* don't sleep with disabled interrupts */
464                         if (EC_FLAGS_MSI || irqs_disabled()) {
465                                 usecs = ACPI_EC_MSI_UDELAY;
466                                 udelay(usecs);
467                                 if (ec_transaction_completed(ec))
468                                         return 0;
469                         } else {
470                                 if (wait_event_timeout(ec->wait,
471                                                 ec_transaction_completed(ec),
472                                                 usecs_to_jiffies(usecs)))
473                                         return 0;
474                         }
475                         spin_lock_irqsave(&ec->lock, flags);
476                         if (time_after(jiffies,
477                                         ec->curr->timestamp +
478                                         usecs_to_jiffies(usecs)))
479                                 advance_transaction(ec);
480                         spin_unlock_irqrestore(&ec->lock, flags);
481                 } while (time_before(jiffies, delay));
482                 pr_debug("controller reset, restart transaction\n");
483                 spin_lock_irqsave(&ec->lock, flags);
484                 start_transaction(ec);
485                 spin_unlock_irqrestore(&ec->lock, flags);
486         }
487         return -ETIME;
488 }
489
490 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
491                                         struct transaction *t)
492 {
493         unsigned long tmp;
494         int ret = 0;
495
496         if (EC_FLAGS_MSI)
497                 udelay(ACPI_EC_MSI_UDELAY);
498         /* start transaction */
499         spin_lock_irqsave(&ec->lock, tmp);
500         /* Enable GPE for command processing (IBF=0/OBF=1) */
501         if (!acpi_ec_submit_flushable_request(ec)) {
502                 ret = -EINVAL;
503                 goto unlock;
504         }
505         /* following two actions should be kept atomic */
506         ec->curr = t;
507         pr_debug("***** Command(%s) started *****\n",
508                  acpi_ec_cmd_string(t->command));
509         start_transaction(ec);
510         spin_unlock_irqrestore(&ec->lock, tmp);
511         ret = ec_poll(ec);
512         spin_lock_irqsave(&ec->lock, tmp);
513         if (t->irq_count == ec_storm_threshold)
514                 acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
515         pr_debug("***** Command(%s) stopped *****\n",
516                  acpi_ec_cmd_string(t->command));
517         ec->curr = NULL;
518         /* Disable GPE for command processing (IBF=0/OBF=1) */
519         acpi_ec_complete_request(ec);
520 unlock:
521         spin_unlock_irqrestore(&ec->lock, tmp);
522         return ret;
523 }
524
525 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
526 {
527         int status;
528         u32 glk;
529
530         if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
531                 return -EINVAL;
532         if (t->rdata)
533                 memset(t->rdata, 0, t->rlen);
534         mutex_lock(&ec->mutex);
535         if (ec->global_lock) {
536                 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
537                 if (ACPI_FAILURE(status)) {
538                         status = -ENODEV;
539                         goto unlock;
540                 }
541         }
542
543         status = acpi_ec_transaction_unlocked(ec, t);
544
545         if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
546                 msleep(1);
547         if (ec->global_lock)
548                 acpi_release_global_lock(glk);
549 unlock:
550         mutex_unlock(&ec->mutex);
551         return status;
552 }
553
554 static int acpi_ec_burst_enable(struct acpi_ec *ec)
555 {
556         u8 d;
557         struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
558                                 .wdata = NULL, .rdata = &d,
559                                 .wlen = 0, .rlen = 1};
560
561         return acpi_ec_transaction(ec, &t);
562 }
563
564 static int acpi_ec_burst_disable(struct acpi_ec *ec)
565 {
566         struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
567                                 .wdata = NULL, .rdata = NULL,
568                                 .wlen = 0, .rlen = 0};
569
570         return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
571                                 acpi_ec_transaction(ec, &t) : 0;
572 }
573
574 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
575 {
576         int result;
577         u8 d;
578         struct transaction t = {.command = ACPI_EC_COMMAND_READ,
579                                 .wdata = &address, .rdata = &d,
580                                 .wlen = 1, .rlen = 1};
581
582         result = acpi_ec_transaction(ec, &t);
583         *data = d;
584         return result;
585 }
586
587 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
588 {
589         u8 wdata[2] = { address, data };
590         struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
591                                 .wdata = wdata, .rdata = NULL,
592                                 .wlen = 2, .rlen = 0};
593
594         return acpi_ec_transaction(ec, &t);
595 }
596
597 int ec_read(u8 addr, u8 *val)
598 {
599         int err;
600         u8 temp_data;
601
602         if (!first_ec)
603                 return -ENODEV;
604
605         err = acpi_ec_read(first_ec, addr, &temp_data);
606
607         if (!err) {
608                 *val = temp_data;
609                 return 0;
610         }
611         return err;
612 }
613 EXPORT_SYMBOL(ec_read);
614
615 int ec_write(u8 addr, u8 val)
616 {
617         int err;
618
619         if (!first_ec)
620                 return -ENODEV;
621
622         err = acpi_ec_write(first_ec, addr, val);
623
624         return err;
625 }
626 EXPORT_SYMBOL(ec_write);
627
628 int ec_transaction(u8 command,
629                    const u8 *wdata, unsigned wdata_len,
630                    u8 *rdata, unsigned rdata_len)
631 {
632         struct transaction t = {.command = command,
633                                 .wdata = wdata, .rdata = rdata,
634                                 .wlen = wdata_len, .rlen = rdata_len};
635
636         if (!first_ec)
637                 return -ENODEV;
638
639         return acpi_ec_transaction(first_ec, &t);
640 }
641 EXPORT_SYMBOL(ec_transaction);
642
643 /* Get the handle to the EC device */
644 acpi_handle ec_get_handle(void)
645 {
646         if (!first_ec)
647                 return NULL;
648         return first_ec->handle;
649 }
650 EXPORT_SYMBOL(ec_get_handle);
651
652 /*
653  * Process _Q events that might have accumulated in the EC.
654  * Run with locked ec mutex.
655  */
656 static void acpi_ec_clear(struct acpi_ec *ec)
657 {
658         int i, status;
659         u8 value = 0;
660
661         for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
662                 status = acpi_ec_query(ec, &value);
663                 if (status || !value)
664                         break;
665         }
666
667         if (unlikely(i == ACPI_EC_CLEAR_MAX))
668                 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
669         else
670                 pr_info("%d stale EC events cleared\n", i);
671 }
672
673 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
674 {
675         unsigned long flags;
676
677         spin_lock_irqsave(&ec->lock, flags);
678         if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
679                 pr_debug("+++++ Starting EC +++++\n");
680                 /* Enable GPE for event processing (SCI_EVT=1) */
681                 if (!resuming)
682                         acpi_ec_submit_request(ec);
683                 pr_info("+++++ EC started +++++\n");
684         }
685         spin_unlock_irqrestore(&ec->lock, flags);
686 }
687
688 static bool acpi_ec_stopped(struct acpi_ec *ec)
689 {
690         unsigned long flags;
691         bool flushed;
692
693         spin_lock_irqsave(&ec->lock, flags);
694         flushed = acpi_ec_flushed(ec);
695         spin_unlock_irqrestore(&ec->lock, flags);
696         return flushed;
697 }
698
699 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
700 {
701         unsigned long flags;
702
703         spin_lock_irqsave(&ec->lock, flags);
704         if (acpi_ec_started(ec)) {
705                 pr_debug("+++++ Stopping EC +++++\n");
706                 set_bit(EC_FLAGS_STOPPED, &ec->flags);
707                 spin_unlock_irqrestore(&ec->lock, flags);
708                 wait_event(ec->wait, acpi_ec_stopped(ec));
709                 spin_lock_irqsave(&ec->lock, flags);
710                 /* Disable GPE for event processing (SCI_EVT=1) */
711                 if (!suspending)
712                         acpi_ec_complete_request(ec);
713                 clear_bit(EC_FLAGS_STARTED, &ec->flags);
714                 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
715                 pr_info("+++++ EC stopped +++++\n");
716         }
717         spin_unlock_irqrestore(&ec->lock, flags);
718 }
719
720 void acpi_ec_block_transactions(void)
721 {
722         struct acpi_ec *ec = first_ec;
723
724         if (!ec)
725                 return;
726
727         mutex_lock(&ec->mutex);
728         /* Prevent transactions from being carried out */
729         acpi_ec_stop(ec, true);
730         mutex_unlock(&ec->mutex);
731 }
732
733 void acpi_ec_unblock_transactions(void)
734 {
735         struct acpi_ec *ec = first_ec;
736
737         if (!ec)
738                 return;
739
740         /* Allow transactions to be carried out again */
741         acpi_ec_start(ec, true);
742
743         if (EC_FLAGS_CLEAR_ON_RESUME)
744                 acpi_ec_clear(ec);
745 }
746
747 void acpi_ec_unblock_transactions_early(void)
748 {
749         /*
750          * Allow transactions to happen again (this function is called from
751          * atomic context during wakeup, so we don't need to acquire the mutex).
752          */
753         if (first_ec)
754                 acpi_ec_start(first_ec, true);
755 }
756
757 /* --------------------------------------------------------------------------
758                                 Event Management
759    -------------------------------------------------------------------------- */
760 static struct acpi_ec_query_handler *
761 acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
762 {
763         if (handler)
764                 kref_get(&handler->kref);
765         return handler;
766 }
767
768 static void acpi_ec_query_handler_release(struct kref *kref)
769 {
770         struct acpi_ec_query_handler *handler =
771                 container_of(kref, struct acpi_ec_query_handler, kref);
772
773         kfree(handler);
774 }
775
776 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
777 {
778         kref_put(&handler->kref, acpi_ec_query_handler_release);
779 }
780
781 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
782                               acpi_handle handle, acpi_ec_query_func func,
783                               void *data)
784 {
785         struct acpi_ec_query_handler *handler =
786             kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
787
788         if (!handler)
789                 return -ENOMEM;
790
791         handler->query_bit = query_bit;
792         handler->handle = handle;
793         handler->func = func;
794         handler->data = data;
795         mutex_lock(&ec->mutex);
796         kref_init(&handler->kref);
797         list_add(&handler->node, &ec->list);
798         mutex_unlock(&ec->mutex);
799         return 0;
800 }
801 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
802
803 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
804 {
805         struct acpi_ec_query_handler *handler, *tmp;
806         LIST_HEAD(free_list);
807
808         mutex_lock(&ec->mutex);
809         list_for_each_entry_safe(handler, tmp, &ec->list, node) {
810                 if (query_bit == handler->query_bit) {
811                         list_del_init(&handler->node);
812                         list_add(&handler->node, &free_list);
813                 }
814         }
815         mutex_unlock(&ec->mutex);
816         list_for_each_entry(handler, &free_list, node)
817                 acpi_ec_put_query_handler(handler);
818 }
819 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
820
821 static void acpi_ec_run(void *cxt)
822 {
823         struct acpi_ec_query_handler *handler = cxt;
824
825         if (!handler)
826                 return;
827         pr_debug("##### Query(0x%02x) started #####\n", handler->query_bit);
828         if (handler->func)
829                 handler->func(handler->data);
830         else if (handler->handle)
831                 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
832         pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
833         acpi_ec_put_query_handler(handler);
834 }
835
836 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
837 {
838         u8 value = 0;
839         int result;
840         acpi_status status;
841         struct acpi_ec_query_handler *handler;
842         struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
843                                 .wdata = NULL, .rdata = &value,
844                                 .wlen = 0, .rlen = 1};
845
846         /*
847          * Query the EC to find out which _Qxx method we need to evaluate.
848          * Note that successful completion of the query causes the ACPI_EC_SCI
849          * bit to be cleared (and thus clearing the interrupt source).
850          */
851         result = acpi_ec_transaction(ec, &t);
852         if (result)
853                 return result;
854         if (data)
855                 *data = value;
856         if (!value)
857                 return -ENODATA;
858
859         mutex_lock(&ec->mutex);
860         list_for_each_entry(handler, &ec->list, node) {
861                 if (value == handler->query_bit) {
862                         /* have custom handler for this bit */
863                         handler = acpi_ec_get_query_handler(handler);
864                         pr_debug("##### Query(0x%02x) scheduled #####\n",
865                                  handler->query_bit);
866                         status = acpi_os_execute((handler->func) ?
867                                 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
868                                 acpi_ec_run, handler);
869                         if (ACPI_FAILURE(status))
870                                 result = -EBUSY;
871                         break;
872                 }
873         }
874         mutex_unlock(&ec->mutex);
875         return result;
876 }
877
878 static void acpi_ec_gpe_poller(struct work_struct *work)
879 {
880         struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
881
882         acpi_ec_query(ec, NULL);
883 }
884
885 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
886         u32 gpe_number, void *data)
887 {
888         unsigned long flags;
889         struct acpi_ec *ec = data;
890
891         spin_lock_irqsave(&ec->lock, flags);
892         advance_transaction(ec);
893         spin_unlock_irqrestore(&ec->lock, flags);
894         return ACPI_INTERRUPT_HANDLED;
895 }
896
897 /* --------------------------------------------------------------------------
898  *                           Address Space Management
899  * -------------------------------------------------------------------------- */
900
901 static acpi_status
902 acpi_ec_space_handler(u32 function, acpi_physical_address address,
903                       u32 bits, u64 *value64,
904                       void *handler_context, void *region_context)
905 {
906         struct acpi_ec *ec = handler_context;
907         int result = 0, i, bytes = bits / 8;
908         u8 *value = (u8 *)value64;
909
910         if ((address > 0xFF) || !value || !handler_context)
911                 return AE_BAD_PARAMETER;
912
913         if (function != ACPI_READ && function != ACPI_WRITE)
914                 return AE_BAD_PARAMETER;
915
916         if (EC_FLAGS_MSI || bits > 8)
917                 acpi_ec_burst_enable(ec);
918
919         for (i = 0; i < bytes; ++i, ++address, ++value)
920                 result = (function == ACPI_READ) ?
921                         acpi_ec_read(ec, address, value) :
922                         acpi_ec_write(ec, address, *value);
923
924         if (EC_FLAGS_MSI || bits > 8)
925                 acpi_ec_burst_disable(ec);
926
927         switch (result) {
928         case -EINVAL:
929                 return AE_BAD_PARAMETER;
930         case -ENODEV:
931                 return AE_NOT_FOUND;
932         case -ETIME:
933                 return AE_TIME;
934         default:
935                 return AE_OK;
936         }
937 }
938
939 /* --------------------------------------------------------------------------
940  *                             Driver Interface
941  * -------------------------------------------------------------------------- */
942
943 static acpi_status
944 ec_parse_io_ports(struct acpi_resource *resource, void *context);
945
946 static struct acpi_ec *make_acpi_ec(void)
947 {
948         struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
949
950         if (!ec)
951                 return NULL;
952         ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
953         mutex_init(&ec->mutex);
954         init_waitqueue_head(&ec->wait);
955         INIT_LIST_HEAD(&ec->list);
956         spin_lock_init(&ec->lock);
957         INIT_WORK(&ec->work, acpi_ec_gpe_poller);
958         return ec;
959 }
960
961 static acpi_status
962 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
963                                void *context, void **return_value)
964 {
965         char node_name[5];
966         struct acpi_buffer buffer = { sizeof(node_name), node_name };
967         struct acpi_ec *ec = context;
968         int value = 0;
969         acpi_status status;
970
971         status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
972
973         if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
974                 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
975         return AE_OK;
976 }
977
978 static acpi_status
979 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
980 {
981         acpi_status status;
982         unsigned long long tmp = 0;
983         struct acpi_ec *ec = context;
984
985         /* clear addr values, ec_parse_io_ports depend on it */
986         ec->command_addr = ec->data_addr = 0;
987
988         status = acpi_walk_resources(handle, METHOD_NAME__CRS,
989                                      ec_parse_io_ports, ec);
990         if (ACPI_FAILURE(status))
991                 return status;
992
993         /* Get GPE bit assignment (EC events). */
994         /* TODO: Add support for _GPE returning a package */
995         status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
996         if (ACPI_FAILURE(status))
997                 return status;
998         ec->gpe = tmp;
999         /* Use the global lock for all EC transactions? */
1000         tmp = 0;
1001         acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1002         ec->global_lock = tmp;
1003         ec->handle = handle;
1004         return AE_CTRL_TERMINATE;
1005 }
1006
1007 static int ec_install_handlers(struct acpi_ec *ec)
1008 {
1009         acpi_status status;
1010
1011         if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
1012                 return 0;
1013         status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1014                                   ACPI_GPE_EDGE_TRIGGERED,
1015                                   &acpi_ec_gpe_handler, ec);
1016         if (ACPI_FAILURE(status))
1017                 return -ENODEV;
1018
1019         acpi_ec_start(ec, false);
1020         status = acpi_install_address_space_handler(ec->handle,
1021                                                     ACPI_ADR_SPACE_EC,
1022                                                     &acpi_ec_space_handler,
1023                                                     NULL, ec);
1024         if (ACPI_FAILURE(status)) {
1025                 if (status == AE_NOT_FOUND) {
1026                         /*
1027                          * Maybe OS fails in evaluating the _REG object.
1028                          * The AE_NOT_FOUND error will be ignored and OS
1029                          * continue to initialize EC.
1030                          */
1031                         pr_err("Fail in evaluating the _REG object"
1032                                 " of EC device. Broken bios is suspected.\n");
1033                 } else {
1034                         acpi_ec_stop(ec, false);
1035                         acpi_remove_gpe_handler(NULL, ec->gpe,
1036                                 &acpi_ec_gpe_handler);
1037                         return -ENODEV;
1038                 }
1039         }
1040
1041         set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
1042         return 0;
1043 }
1044
1045 static void ec_remove_handlers(struct acpi_ec *ec)
1046 {
1047         if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
1048                 return;
1049         acpi_ec_stop(ec, false);
1050         if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1051                                 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1052                 pr_err("failed to remove space handler\n");
1053         if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1054                                 &acpi_ec_gpe_handler)))
1055                 pr_err("failed to remove gpe handler\n");
1056         clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
1057 }
1058
1059 static int acpi_ec_add(struct acpi_device *device)
1060 {
1061         struct acpi_ec *ec = NULL;
1062         int ret;
1063
1064         strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1065         strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1066
1067         /* Check for boot EC */
1068         if (boot_ec &&
1069             (boot_ec->handle == device->handle ||
1070              boot_ec->handle == ACPI_ROOT_OBJECT)) {
1071                 ec = boot_ec;
1072                 boot_ec = NULL;
1073         } else {
1074                 ec = make_acpi_ec();
1075                 if (!ec)
1076                         return -ENOMEM;
1077         }
1078         if (ec_parse_device(device->handle, 0, ec, NULL) !=
1079                 AE_CTRL_TERMINATE) {
1080                         kfree(ec);
1081                         return -EINVAL;
1082         }
1083
1084         /* Find and register all query methods */
1085         acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1086                             acpi_ec_register_query_methods, NULL, ec, NULL);
1087
1088         if (!first_ec)
1089                 first_ec = ec;
1090         device->driver_data = ec;
1091
1092         ret = !!request_region(ec->data_addr, 1, "EC data");
1093         WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1094         ret = !!request_region(ec->command_addr, 1, "EC cmd");
1095         WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1096
1097         pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
1098                           ec->gpe, ec->command_addr, ec->data_addr);
1099
1100         ret = ec_install_handlers(ec);
1101
1102         /* EC is fully operational, allow queries */
1103         clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
1104
1105         /* Clear stale _Q events if hardware might require that */
1106         if (EC_FLAGS_CLEAR_ON_RESUME)
1107                 acpi_ec_clear(ec);
1108         return ret;
1109 }
1110
1111 static int acpi_ec_remove(struct acpi_device *device)
1112 {
1113         struct acpi_ec *ec;
1114         struct acpi_ec_query_handler *handler, *tmp;
1115
1116         if (!device)
1117                 return -EINVAL;
1118
1119         ec = acpi_driver_data(device);
1120         ec_remove_handlers(ec);
1121         mutex_lock(&ec->mutex);
1122         list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1123                 list_del(&handler->node);
1124                 kfree(handler);
1125         }
1126         mutex_unlock(&ec->mutex);
1127         release_region(ec->data_addr, 1);
1128         release_region(ec->command_addr, 1);
1129         device->driver_data = NULL;
1130         if (ec == first_ec)
1131                 first_ec = NULL;
1132         kfree(ec);
1133         return 0;
1134 }
1135
1136 static acpi_status
1137 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1138 {
1139         struct acpi_ec *ec = context;
1140
1141         if (resource->type != ACPI_RESOURCE_TYPE_IO)
1142                 return AE_OK;
1143
1144         /*
1145          * The first address region returned is the data port, and
1146          * the second address region returned is the status/command
1147          * port.
1148          */
1149         if (ec->data_addr == 0)
1150                 ec->data_addr = resource->data.io.minimum;
1151         else if (ec->command_addr == 0)
1152                 ec->command_addr = resource->data.io.minimum;
1153         else
1154                 return AE_CTRL_TERMINATE;
1155
1156         return AE_OK;
1157 }
1158
1159 int __init acpi_boot_ec_enable(void)
1160 {
1161         if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
1162                 return 0;
1163         if (!ec_install_handlers(boot_ec)) {
1164                 first_ec = boot_ec;
1165                 return 0;
1166         }
1167         return -EFAULT;
1168 }
1169
1170 static const struct acpi_device_id ec_device_ids[] = {
1171         {"PNP0C09", 0},
1172         {"", 0},
1173 };
1174
1175 /* Some BIOS do not survive early DSDT scan, skip it */
1176 static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
1177 {
1178         EC_FLAGS_SKIP_DSDT_SCAN = 1;
1179         return 0;
1180 }
1181
1182 /* ASUStek often supplies us with broken ECDT, validate it */
1183 static int ec_validate_ecdt(const struct dmi_system_id *id)
1184 {
1185         EC_FLAGS_VALIDATE_ECDT = 1;
1186         return 0;
1187 }
1188
1189 /* MSI EC needs special treatment, enable it */
1190 static int ec_flag_msi(const struct dmi_system_id *id)
1191 {
1192         pr_debug("Detected MSI hardware, enabling workarounds.\n");
1193         EC_FLAGS_MSI = 1;
1194         EC_FLAGS_VALIDATE_ECDT = 1;
1195         return 0;
1196 }
1197
1198 /*
1199  * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
1200  * the GPE storm threshold back to 20
1201  */
1202 static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
1203 {
1204         pr_debug("Setting the EC GPE storm threshold to 20\n");
1205         ec_storm_threshold  = 20;
1206         return 0;
1207 }
1208
1209 /*
1210  * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
1211  * which case, we complete the QR_EC without issuing it to the firmware.
1212  * https://bugzilla.kernel.org/show_bug.cgi?id=86211
1213  */
1214 static int ec_flag_query_handshake(const struct dmi_system_id *id)
1215 {
1216         pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1217         EC_FLAGS_QUERY_HANDSHAKE = 1;
1218         return 0;
1219 }
1220
1221 /*
1222  * On some hardware it is necessary to clear events accumulated by the EC during
1223  * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1224  * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1225  *
1226  * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1227  *
1228  * Ideally, the EC should also be instructed NOT to accumulate events during
1229  * sleep (which Windows seems to do somehow), but the interface to control this
1230  * behaviour is not known at this time.
1231  *
1232  * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1233  * however it is very likely that other Samsung models are affected.
1234  *
1235  * On systems which don't accumulate _Q events during sleep, this extra check
1236  * should be harmless.
1237  */
1238 static int ec_clear_on_resume(const struct dmi_system_id *id)
1239 {
1240         pr_debug("Detected system needing EC poll on resume.\n");
1241         EC_FLAGS_CLEAR_ON_RESUME = 1;
1242         return 0;
1243 }
1244
1245 static struct dmi_system_id ec_dmi_table[] __initdata = {
1246         {
1247         ec_skip_dsdt_scan, "Compal JFL92", {
1248         DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
1249         DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
1250         {
1251         ec_flag_msi, "MSI hardware", {
1252         DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
1253         {
1254         ec_flag_msi, "MSI hardware", {
1255         DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
1256         {
1257         ec_flag_msi, "MSI hardware", {
1258         DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
1259         {
1260         ec_flag_msi, "MSI hardware", {
1261         DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
1262         {
1263         ec_flag_msi, "Quanta hardware", {
1264         DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
1265         DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
1266         {
1267         ec_flag_msi, "Quanta hardware", {
1268         DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
1269         DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
1270         {
1271         ec_flag_msi, "Clevo W350etq", {
1272         DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."),
1273         DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL},
1274         {
1275         ec_validate_ecdt, "ASUS hardware", {
1276         DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
1277         {
1278         ec_validate_ecdt, "ASUS hardware", {
1279         DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
1280         {
1281         ec_enlarge_storm_threshold, "CLEVO hardware", {
1282         DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
1283         DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
1284         {
1285         ec_skip_dsdt_scan, "HP Folio 13", {
1286         DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1287         DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
1288         {
1289         ec_validate_ecdt, "ASUS hardware", {
1290         DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
1291         DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
1292         {
1293         ec_clear_on_resume, "Samsung hardware", {
1294         DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1295         {
1296         ec_flag_query_handshake, "Acer hardware", {
1297         DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
1298         {},
1299 };
1300
1301 int __init acpi_ec_ecdt_probe(void)
1302 {
1303         acpi_status status;
1304         struct acpi_ec *saved_ec = NULL;
1305         struct acpi_table_ecdt *ecdt_ptr;
1306
1307         boot_ec = make_acpi_ec();
1308         if (!boot_ec)
1309                 return -ENOMEM;
1310         /*
1311          * Generate a boot ec context
1312          */
1313         dmi_check_system(ec_dmi_table);
1314         status = acpi_get_table(ACPI_SIG_ECDT, 1,
1315                                 (struct acpi_table_header **)&ecdt_ptr);
1316         if (ACPI_SUCCESS(status)) {
1317                 pr_info("EC description table is found, configuring boot EC\n");
1318                 boot_ec->command_addr = ecdt_ptr->control.address;
1319                 boot_ec->data_addr = ecdt_ptr->data.address;
1320                 boot_ec->gpe = ecdt_ptr->gpe;
1321                 boot_ec->handle = ACPI_ROOT_OBJECT;
1322                 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
1323                                 &boot_ec->handle);
1324                 /* Don't trust ECDT, which comes from ASUSTek */
1325                 if (!EC_FLAGS_VALIDATE_ECDT)
1326                         goto install;
1327                 saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
1328                 if (!saved_ec)
1329                         return -ENOMEM;
1330         /* fall through */
1331         }
1332
1333         if (EC_FLAGS_SKIP_DSDT_SCAN) {
1334                 kfree(saved_ec);
1335                 return -ENODEV;
1336         }
1337
1338         /* This workaround is needed only on some broken machines,
1339          * which require early EC, but fail to provide ECDT */
1340         pr_debug("Look up EC in DSDT\n");
1341         status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
1342                                         boot_ec, NULL);
1343         /* Check that acpi_get_devices actually find something */
1344         if (ACPI_FAILURE(status) || !boot_ec->handle)
1345                 goto error;
1346         if (saved_ec) {
1347                 /* try to find good ECDT from ASUSTek */
1348                 if (saved_ec->command_addr != boot_ec->command_addr ||
1349                     saved_ec->data_addr != boot_ec->data_addr ||
1350                     saved_ec->gpe != boot_ec->gpe ||
1351                     saved_ec->handle != boot_ec->handle)
1352                         pr_info("ASUSTek keeps feeding us with broken "
1353                         "ECDT tables, which are very hard to workaround. "
1354                         "Trying to use DSDT EC info instead. Please send "
1355                         "output of acpidump to linux-acpi@vger.kernel.org\n");
1356                 kfree(saved_ec);
1357                 saved_ec = NULL;
1358         } else {
1359                 /* We really need to limit this workaround, the only ASUS,
1360                 * which needs it, has fake EC._INI method, so use it as flag.
1361                 * Keep boot_ec struct as it will be needed soon.
1362                 */
1363                 if (!dmi_name_in_vendors("ASUS") ||
1364                     !acpi_has_method(boot_ec->handle, "_INI"))
1365                         return -ENODEV;
1366         }
1367 install:
1368         if (!ec_install_handlers(boot_ec)) {
1369                 first_ec = boot_ec;
1370                 return 0;
1371         }
1372 error:
1373         kfree(boot_ec);
1374         kfree(saved_ec);
1375         boot_ec = NULL;
1376         return -ENODEV;
1377 }
1378
1379 static struct acpi_driver acpi_ec_driver = {
1380         .name = "ec",
1381         .class = ACPI_EC_CLASS,
1382         .ids = ec_device_ids,
1383         .ops = {
1384                 .add = acpi_ec_add,
1385                 .remove = acpi_ec_remove,
1386                 },
1387 };
1388
1389 int __init acpi_ec_init(void)
1390 {
1391         int result = 0;
1392
1393         /* Now register the driver for the EC */
1394         result = acpi_bus_register_driver(&acpi_ec_driver);
1395         if (result < 0)
1396                 return -ENODEV;
1397
1398         return result;
1399 }
1400
1401 /* EC driver currently not unloadable */
1402 #if 0
1403 static void __exit acpi_ec_exit(void)
1404 {
1405
1406         acpi_bus_unregister_driver(&acpi_ec_driver);
1407 }
1408 #endif  /* 0 */