3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
33 #include "hda_controller.h"
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
52 * AZX stream operations.
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
59 * Before stream start, initialize parameter
61 azx_dev->insufficient = 1;
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 azx_stream_clear(chip, azx_dev);
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
97 azx_stream_clear(chip, azx_dev);
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
122 * set up the SD for streaming
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
154 /* enable the position buffer */
155 if (chip->position_fix[0] != POS_FIX_LPIB ||
156 chip->position_fix[1] != POS_FIX_LPIB) {
157 if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
191 if (res->assigned_key == key) {
193 res->assigned_key = key;
203 res->assigned_key = key;
209 /* release the assigned stream */
210 static inline void azx_release_device(struct azx_dev *azx_dev)
215 static cycle_t azx_cc_read(const struct cyclecounter *cc)
217 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
218 struct snd_pcm_substream *substream = azx_dev->substream;
219 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
220 struct azx *chip = apcm->chip;
222 return azx_readl(chip, WALLCLK);
225 static void azx_timecounter_init(struct snd_pcm_substream *substream,
226 bool force, cycle_t last)
228 struct azx_dev *azx_dev = get_azx_dev(substream);
229 struct timecounter *tc = &azx_dev->azx_tc;
230 struct cyclecounter *cc = &azx_dev->azx_cc;
233 cc->read = azx_cc_read;
234 cc->mask = CLOCKSOURCE_MASK(32);
237 * Converting from 24 MHz to ns means applying a 125/3 factor.
238 * To avoid any saturation issues in intermediate operations,
239 * the 125 factor is applied first. The division is applied
240 * last after reading the timecounter value.
241 * Applying the 1/3 factor as part of the multiplication
242 * requires at least 20 bits for a decent precision, however
243 * overflows occur after about 4 hours or less, not a option.
246 cc->mult = 125; /* saturation after 195 years */
249 nsec = 0; /* audio time is elapsed time since trigger */
250 timecounter_init(tc, cc, nsec);
253 * force timecounter to use predefined value,
254 * used for synchronized starts
256 tc->cycle_last = last;
259 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
262 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
263 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
264 u64 codec_frames, codec_nsecs;
266 if (!hinfo->ops.get_delay)
269 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
270 codec_nsecs = div_u64(codec_frames * 1000000000LL,
271 substream->runtime->rate);
273 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
274 return nsec + codec_nsecs;
276 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
282 static int setup_bdle(struct azx *chip,
283 struct snd_dma_buffer *dmab,
284 struct azx_dev *azx_dev, u32 **bdlp,
285 int ofs, int size, int with_ioc)
293 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
296 addr = snd_sgbuf_get_addr(dmab, ofs);
297 /* program the address field of the BDL entry */
298 bdl[0] = cpu_to_le32((u32)addr);
299 bdl[1] = cpu_to_le32(upper_32_bits(addr));
300 /* program the size field of the BDL entry */
301 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
302 /* one BDLE cannot cross 4K boundary on CTHDA chips */
303 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
304 u32 remain = 0x1000 - (ofs & 0xfff);
308 bdl[2] = cpu_to_le32(chunk);
309 /* program the IOC to enable interrupt
310 * only when the whole fragment is processed
313 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
325 static int azx_setup_periods(struct azx *chip,
326 struct snd_pcm_substream *substream,
327 struct azx_dev *azx_dev)
330 int i, ofs, periods, period_bytes;
333 /* reset BDL address */
334 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
335 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
337 period_bytes = azx_dev->period_bytes;
338 periods = azx_dev->bufsize / period_bytes;
340 /* program the initial BDL entries */
341 bdl = (u32 *)azx_dev->bdl.area;
345 if (chip->bdl_pos_adj)
346 pos_adj = chip->bdl_pos_adj[chip->dev_index];
347 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
348 struct snd_pcm_runtime *runtime = substream->runtime;
349 int pos_align = pos_adj;
350 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
354 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
356 pos_adj = frames_to_bytes(runtime, pos_adj);
357 if (pos_adj >= period_bytes) {
358 dev_warn(chip->card->dev,"Too big adjustment %d\n",
362 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
364 &bdl, ofs, pos_adj, true);
371 for (i = 0; i < periods; i++) {
372 if (i == periods - 1 && pos_adj)
373 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
375 period_bytes - pos_adj, 0);
377 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
380 !azx_dev->no_period_wakeup);
387 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
388 azx_dev->bufsize, period_bytes);
396 static int azx_pcm_close(struct snd_pcm_substream *substream)
398 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
399 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
400 struct azx *chip = apcm->chip;
401 struct azx_dev *azx_dev = get_azx_dev(substream);
404 mutex_lock(&chip->open_mutex);
405 spin_lock_irqsave(&chip->reg_lock, flags);
406 azx_dev->substream = NULL;
407 azx_dev->running = 0;
408 spin_unlock_irqrestore(&chip->reg_lock, flags);
409 azx_release_device(azx_dev);
410 hinfo->ops.close(hinfo, apcm->codec, substream);
411 snd_hda_power_down(apcm->codec);
412 mutex_unlock(&chip->open_mutex);
416 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
417 struct snd_pcm_hw_params *hw_params)
419 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
420 struct azx *chip = apcm->chip;
423 dsp_lock(get_azx_dev(substream));
424 if (dsp_is_locked(get_azx_dev(substream))) {
429 ret = chip->ops->substream_alloc_pages(chip, substream,
430 params_buffer_bytes(hw_params));
432 dsp_unlock(get_azx_dev(substream));
436 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
438 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
439 struct azx_dev *azx_dev = get_azx_dev(substream);
440 struct azx *chip = apcm->chip;
441 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
444 /* reset BDL address */
446 if (!dsp_is_locked(azx_dev)) {
447 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
448 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
449 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
450 azx_dev->bufsize = 0;
451 azx_dev->period_bytes = 0;
452 azx_dev->format_val = 0;
455 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
457 err = chip->ops->substream_free_pages(chip, substream);
458 azx_dev->prepared = 0;
463 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
465 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
466 struct azx *chip = apcm->chip;
467 struct azx_dev *azx_dev = get_azx_dev(substream);
468 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
469 struct snd_pcm_runtime *runtime = substream->runtime;
470 unsigned int bufsize, period_bytes, format_val, stream_tag;
472 struct hda_spdif_out *spdif =
473 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
474 unsigned short ctls = spdif ? spdif->ctls : 0;
477 if (dsp_is_locked(azx_dev)) {
482 azx_stream_reset(chip, azx_dev);
483 format_val = snd_hda_calc_stream_format(runtime->rate,
489 dev_err(chip->card->dev,
490 "invalid format_val, rate=%d, ch=%d, format=%d\n",
491 runtime->rate, runtime->channels, runtime->format);
496 bufsize = snd_pcm_lib_buffer_bytes(substream);
497 period_bytes = snd_pcm_lib_period_bytes(substream);
499 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
500 bufsize, format_val);
502 if (bufsize != azx_dev->bufsize ||
503 period_bytes != azx_dev->period_bytes ||
504 format_val != azx_dev->format_val ||
505 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
506 azx_dev->bufsize = bufsize;
507 azx_dev->period_bytes = period_bytes;
508 azx_dev->format_val = format_val;
509 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
510 err = azx_setup_periods(chip, substream, azx_dev);
515 /* when LPIB delay correction gives a small negative value,
516 * we ignore it; currently set the threshold statically to
519 if (runtime->period_size > 64)
520 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
522 azx_dev->delay_negative_threshold = 0;
524 /* wallclk has 24Mhz clock source */
525 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
526 runtime->rate) * 1000);
527 azx_setup_controller(chip, azx_dev);
528 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
530 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
532 azx_dev->fifo_size = 0;
534 stream_tag = azx_dev->stream_tag;
535 /* CA-IBG chips need the playback stream starting from 1 */
536 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
537 stream_tag > chip->capture_streams)
538 stream_tag -= chip->capture_streams;
539 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
540 azx_dev->format_val, substream);
544 azx_dev->prepared = 1;
549 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
551 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
552 struct azx *chip = apcm->chip;
553 struct azx_dev *azx_dev;
554 struct snd_pcm_substream *s;
555 int rstart = 0, start, nsync = 0, sbits = 0;
558 azx_dev = get_azx_dev(substream);
559 trace_azx_pcm_trigger(chip, azx_dev, cmd);
561 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
565 case SNDRV_PCM_TRIGGER_START:
567 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
568 case SNDRV_PCM_TRIGGER_RESUME:
571 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
572 case SNDRV_PCM_TRIGGER_SUSPEND:
573 case SNDRV_PCM_TRIGGER_STOP:
580 snd_pcm_group_for_each_entry(s, substream) {
581 if (s->pcm->card != substream->pcm->card)
583 azx_dev = get_azx_dev(s);
584 sbits |= 1 << azx_dev->index;
586 snd_pcm_trigger_done(s, substream);
589 spin_lock(&chip->reg_lock);
591 /* first, set SYNC bits of corresponding streams */
592 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
593 azx_writel(chip, OLD_SSYNC,
594 azx_readl(chip, OLD_SSYNC) | sbits);
596 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
598 snd_pcm_group_for_each_entry(s, substream) {
599 if (s->pcm->card != substream->pcm->card)
601 azx_dev = get_azx_dev(s);
603 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
605 azx_dev->start_wallclk -=
606 azx_dev->period_wallclk;
607 azx_stream_start(chip, azx_dev);
609 azx_stream_stop(chip, azx_dev);
611 azx_dev->running = start;
613 spin_unlock(&chip->reg_lock);
615 /* wait until all FIFOs get ready */
616 for (timeout = 5000; timeout; timeout--) {
618 snd_pcm_group_for_each_entry(s, substream) {
619 if (s->pcm->card != substream->pcm->card)
621 azx_dev = get_azx_dev(s);
622 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
631 /* wait until all RUN bits are cleared */
632 for (timeout = 5000; timeout; timeout--) {
634 snd_pcm_group_for_each_entry(s, substream) {
635 if (s->pcm->card != substream->pcm->card)
637 azx_dev = get_azx_dev(s);
638 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
647 spin_lock(&chip->reg_lock);
648 /* reset SYNC bits */
649 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
650 azx_writel(chip, OLD_SSYNC,
651 azx_readl(chip, OLD_SSYNC) & ~sbits);
653 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
655 azx_timecounter_init(substream, 0, 0);
659 /* same start cycle for master and group */
660 azx_dev = get_azx_dev(substream);
661 cycle_last = azx_dev->azx_tc.cycle_last;
663 snd_pcm_group_for_each_entry(s, substream) {
664 if (s->pcm->card != substream->pcm->card)
666 azx_timecounter_init(s, 1, cycle_last);
670 spin_unlock(&chip->reg_lock);
674 /* get the current DMA position with correction on VIA chips */
675 static unsigned int azx_via_get_position(struct azx *chip,
676 struct azx_dev *azx_dev)
678 unsigned int link_pos, mini_pos, bound_pos;
679 unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
680 unsigned int fifo_size;
682 link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
683 if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
684 /* Playback, no problem using link position */
690 * use mod to get the DMA position just like old chipset
692 mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
693 mod_dma_pos %= azx_dev->period_bytes;
695 /* azx_dev->fifo_size can't get FIFO size of in stream.
696 * Get from base address + offset.
698 fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
700 if (azx_dev->insufficient) {
701 /* Link position never gather than FIFO size */
702 if (link_pos <= fifo_size)
705 azx_dev->insufficient = 0;
708 if (link_pos <= fifo_size)
709 mini_pos = azx_dev->bufsize + link_pos - fifo_size;
711 mini_pos = link_pos - fifo_size;
713 /* Find nearest previous boudary */
714 mod_mini_pos = mini_pos % azx_dev->period_bytes;
715 mod_link_pos = link_pos % azx_dev->period_bytes;
716 if (mod_link_pos >= fifo_size)
717 bound_pos = link_pos - mod_link_pos;
718 else if (mod_dma_pos >= mod_mini_pos)
719 bound_pos = mini_pos - mod_mini_pos;
721 bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
722 if (bound_pos >= azx_dev->bufsize)
726 /* Calculate real DMA position we want */
727 return bound_pos + mod_dma_pos;
730 unsigned int azx_get_position(struct azx *chip,
731 struct azx_dev *azx_dev,
734 struct snd_pcm_substream *substream = azx_dev->substream;
735 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
737 int stream = substream->stream;
738 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
741 switch (chip->position_fix[stream]) {
744 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
746 case POS_FIX_VIACOMBO:
747 pos = azx_via_get_position(chip, azx_dev);
750 /* use the position buffer */
751 pos = le32_to_cpu(*azx_dev->posbuf);
752 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
753 if (!pos || pos == (u32)-1) {
754 dev_info(chip->card->dev,
755 "Invalid position buffer, using LPIB read method instead.\n");
756 chip->position_fix[stream] = POS_FIX_LPIB;
757 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
759 chip->position_fix[stream] = POS_FIX_POSBUF;
764 if (pos >= azx_dev->bufsize)
767 /* calculate runtime delay from LPIB */
768 if (substream->runtime &&
769 chip->position_fix[stream] == POS_FIX_POSBUF &&
770 (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
771 unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
772 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
773 delay = pos - lpib_pos;
775 delay = lpib_pos - pos;
777 if (delay >= azx_dev->delay_negative_threshold)
780 delay += azx_dev->bufsize;
782 if (delay >= azx_dev->period_bytes) {
783 dev_info(chip->card->dev,
784 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
785 delay, azx_dev->period_bytes);
787 chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
789 delay = bytes_to_frames(substream->runtime, delay);
792 if (substream->runtime) {
793 if (hinfo->ops.get_delay)
794 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
796 substream->runtime->delay = delay;
799 trace_azx_get_position(chip, azx_dev, pos, delay);
802 EXPORT_SYMBOL_GPL(azx_get_position);
804 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
806 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
807 struct azx *chip = apcm->chip;
808 struct azx_dev *azx_dev = get_azx_dev(substream);
809 return bytes_to_frames(substream->runtime,
810 azx_get_position(chip, azx_dev, false));
813 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
816 struct azx_dev *azx_dev = get_azx_dev(substream);
819 nsec = timecounter_read(&azx_dev->azx_tc);
820 nsec = div_u64(nsec, 3); /* can be optimized */
821 nsec = azx_adjust_codec_delay(substream, nsec);
823 *ts = ns_to_timespec(nsec);
828 static struct snd_pcm_hardware azx_pcm_hw = {
829 .info = (SNDRV_PCM_INFO_MMAP |
830 SNDRV_PCM_INFO_INTERLEAVED |
831 SNDRV_PCM_INFO_BLOCK_TRANSFER |
832 SNDRV_PCM_INFO_MMAP_VALID |
833 /* No full-resume yet implemented */
834 /* SNDRV_PCM_INFO_RESUME |*/
835 SNDRV_PCM_INFO_PAUSE |
836 SNDRV_PCM_INFO_SYNC_START |
837 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
838 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
839 .formats = SNDRV_PCM_FMTBIT_S16_LE,
840 .rates = SNDRV_PCM_RATE_48000,
845 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
846 .period_bytes_min = 128,
847 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
849 .periods_max = AZX_MAX_FRAG,
853 static int azx_pcm_open(struct snd_pcm_substream *substream)
855 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
856 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
857 struct azx *chip = apcm->chip;
858 struct azx_dev *azx_dev;
859 struct snd_pcm_runtime *runtime = substream->runtime;
864 mutex_lock(&chip->open_mutex);
865 azx_dev = azx_assign_device(chip, substream);
866 if (azx_dev == NULL) {
867 mutex_unlock(&chip->open_mutex);
870 runtime->hw = azx_pcm_hw;
871 runtime->hw.channels_min = hinfo->channels_min;
872 runtime->hw.channels_max = hinfo->channels_max;
873 runtime->hw.formats = hinfo->formats;
874 runtime->hw.rates = hinfo->rates;
875 snd_pcm_limit_hw_rates(runtime);
876 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
878 /* avoid wrap-around with wall-clock */
879 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
883 if (chip->align_buffer_size)
884 /* constrain buffer sizes to be multiple of 128
885 bytes. This is more efficient in terms of memory
886 access but isn't required by the HDA spec and
887 prevents users from specifying exact period/buffer
888 sizes. For example for 44.1kHz, a period size set
889 to 20ms will be rounded to 19.59ms. */
892 /* Don't enforce steps on buffer sizes, still need to
893 be multiple of 4 bytes (HDA spec). Tested on Intel
894 HDA controllers, may not work on all devices where
895 option needs to be disabled */
898 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
900 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
902 snd_hda_power_up_d3wait(apcm->codec);
903 err = hinfo->ops.open(hinfo, apcm->codec, substream);
905 azx_release_device(azx_dev);
906 snd_hda_power_down(apcm->codec);
907 mutex_unlock(&chip->open_mutex);
910 snd_pcm_limit_hw_rates(runtime);
912 if (snd_BUG_ON(!runtime->hw.channels_min) ||
913 snd_BUG_ON(!runtime->hw.channels_max) ||
914 snd_BUG_ON(!runtime->hw.formats) ||
915 snd_BUG_ON(!runtime->hw.rates)) {
916 azx_release_device(azx_dev);
917 hinfo->ops.close(hinfo, apcm->codec, substream);
918 snd_hda_power_down(apcm->codec);
919 mutex_unlock(&chip->open_mutex);
923 /* disable WALLCLOCK timestamps for capture streams
924 until we figure out how to handle digital inputs */
925 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
926 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
928 spin_lock_irqsave(&chip->reg_lock, flags);
929 azx_dev->substream = substream;
930 azx_dev->running = 0;
931 spin_unlock_irqrestore(&chip->reg_lock, flags);
933 runtime->private_data = azx_dev;
934 snd_pcm_set_sync(substream);
935 mutex_unlock(&chip->open_mutex);
939 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
940 struct vm_area_struct *area)
942 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
943 struct azx *chip = apcm->chip;
944 if (chip->ops->pcm_mmap_prepare)
945 chip->ops->pcm_mmap_prepare(substream, area);
946 return snd_pcm_lib_default_mmap(substream, area);
949 static struct snd_pcm_ops azx_pcm_ops = {
950 .open = azx_pcm_open,
951 .close = azx_pcm_close,
952 .ioctl = snd_pcm_lib_ioctl,
953 .hw_params = azx_pcm_hw_params,
954 .hw_free = azx_pcm_hw_free,
955 .prepare = azx_pcm_prepare,
956 .trigger = azx_pcm_trigger,
957 .pointer = azx_pcm_pointer,
958 .wall_clock = azx_get_wallclock_tstamp,
959 .mmap = azx_pcm_mmap,
960 .page = snd_pcm_sgbuf_ops_page,
963 static void azx_pcm_free(struct snd_pcm *pcm)
965 struct azx_pcm *apcm = pcm->private_data;
967 list_del(&apcm->list);
972 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
974 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
975 struct hda_pcm *cpcm)
977 struct azx *chip = bus->private_data;
979 struct azx_pcm *apcm;
980 int pcm_dev = cpcm->device;
984 list_for_each_entry(apcm, &chip->pcm_list, list) {
985 if (apcm->pcm->device == pcm_dev) {
986 dev_err(chip->card->dev, "PCM %d already exists\n",
991 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
992 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
993 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
997 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
998 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1003 apcm->codec = codec;
1004 pcm->private_data = apcm;
1005 pcm->private_free = azx_pcm_free;
1006 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1007 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1008 list_add_tail(&apcm->list, &chip->pcm_list);
1010 for (s = 0; s < 2; s++) {
1011 apcm->hinfo[s] = &cpcm->stream[s];
1012 if (cpcm->stream[s].substreams)
1013 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1015 /* buffer pre-allocation */
1016 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1017 if (size > MAX_PREALLOC_SIZE)
1018 size = MAX_PREALLOC_SIZE;
1019 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1021 size, MAX_PREALLOC_SIZE);
1023 pcm->dev = &codec->dev;
1028 * CORB / RIRB interface
1030 static int azx_alloc_cmd_io(struct azx *chip)
1034 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
1035 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1036 PAGE_SIZE, &chip->rb);
1038 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1041 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1043 static void azx_init_cmd_io(struct azx *chip)
1047 spin_lock_irq(&chip->reg_lock);
1049 chip->corb.addr = chip->rb.addr;
1050 chip->corb.buf = (u32 *)chip->rb.area;
1051 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1052 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1054 /* set the corb size to 256 entries (ULI requires explicitly) */
1055 azx_writeb(chip, CORBSIZE, 0x02);
1056 /* set the corb write pointer to 0 */
1057 azx_writew(chip, CORBWP, 0);
1059 /* reset the corb hw read pointer */
1060 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
1061 for (timeout = 1000; timeout > 0; timeout--) {
1062 if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1067 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1068 azx_readw(chip, CORBRP));
1070 azx_writew(chip, CORBRP, 0);
1071 for (timeout = 1000; timeout > 0; timeout--) {
1072 if (azx_readw(chip, CORBRP) == 0)
1077 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1078 azx_readw(chip, CORBRP));
1080 /* enable corb dma */
1081 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1084 chip->rirb.addr = chip->rb.addr + 2048;
1085 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1086 chip->rirb.wp = chip->rirb.rp = 0;
1087 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1088 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1089 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1091 /* set the rirb size to 256 entries (ULI requires explicitly) */
1092 azx_writeb(chip, RIRBSIZE, 0x02);
1093 /* reset the rirb hw write pointer */
1094 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1095 /* set N=1, get RIRB response interrupt for new entry */
1096 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1097 azx_writew(chip, RINTCNT, 0xc0);
1099 azx_writew(chip, RINTCNT, 1);
1100 /* enable rirb dma and response irq */
1101 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1102 spin_unlock_irq(&chip->reg_lock);
1104 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1106 static void azx_free_cmd_io(struct azx *chip)
1108 spin_lock_irq(&chip->reg_lock);
1109 /* disable ringbuffer DMAs */
1110 azx_writeb(chip, RIRBCTL, 0);
1111 azx_writeb(chip, CORBCTL, 0);
1112 spin_unlock_irq(&chip->reg_lock);
1114 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1116 static unsigned int azx_command_addr(u32 cmd)
1118 unsigned int addr = cmd >> 28;
1120 if (addr >= AZX_MAX_CODECS) {
1128 /* send a command */
1129 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1131 struct azx *chip = bus->private_data;
1132 unsigned int addr = azx_command_addr(val);
1133 unsigned int wp, rp;
1135 spin_lock_irq(&chip->reg_lock);
1137 /* add command to corb */
1138 wp = azx_readw(chip, CORBWP);
1140 /* something wrong, controller likely turned to D3 */
1141 spin_unlock_irq(&chip->reg_lock);
1145 wp %= ICH6_MAX_CORB_ENTRIES;
1147 rp = azx_readw(chip, CORBRP);
1149 /* oops, it's full */
1150 spin_unlock_irq(&chip->reg_lock);
1154 chip->rirb.cmds[addr]++;
1155 chip->corb.buf[wp] = cpu_to_le32(val);
1156 azx_writew(chip, CORBWP, wp);
1158 spin_unlock_irq(&chip->reg_lock);
1163 #define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1165 /* retrieve RIRB entry - called from interrupt handler */
1166 static void azx_update_rirb(struct azx *chip)
1168 unsigned int rp, wp;
1172 wp = azx_readw(chip, RIRBWP);
1174 /* something wrong, controller likely turned to D3 */
1178 if (wp == chip->rirb.wp)
1182 while (chip->rirb.rp != wp) {
1184 chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1186 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1187 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1188 res = le32_to_cpu(chip->rirb.buf[rp]);
1189 addr = res_ex & 0xf;
1190 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1191 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1196 else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1197 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1198 else if (chip->rirb.cmds[addr]) {
1199 chip->rirb.res[addr] = res;
1201 chip->rirb.cmds[addr]--;
1202 } else if (printk_ratelimit()) {
1203 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1205 chip->last_cmd[addr]);
1210 /* receive a response */
1211 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1214 struct azx *chip = bus->private_data;
1215 unsigned long timeout;
1216 unsigned long loopcounter;
1220 timeout = jiffies + msecs_to_jiffies(1000);
1222 for (loopcounter = 0;; loopcounter++) {
1223 if (chip->polling_mode || do_poll) {
1224 spin_lock_irq(&chip->reg_lock);
1225 azx_update_rirb(chip);
1226 spin_unlock_irq(&chip->reg_lock);
1228 if (!chip->rirb.cmds[addr]) {
1230 bus->rirb_error = 0;
1233 chip->poll_count = 0;
1234 return chip->rirb.res[addr]; /* the last value */
1236 if (time_after(jiffies, timeout))
1238 if (bus->needs_damn_long_delay || loopcounter > 3000)
1239 msleep(2); /* temporary workaround */
1246 if (!bus->no_response_fallback)
1249 if (!chip->polling_mode && chip->poll_count < 2) {
1250 dev_dbg(chip->card->dev,
1251 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1252 chip->last_cmd[addr]);
1259 if (!chip->polling_mode) {
1260 dev_warn(chip->card->dev,
1261 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1262 chip->last_cmd[addr]);
1263 chip->polling_mode = 1;
1268 dev_warn(chip->card->dev,
1269 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1270 chip->last_cmd[addr]);
1271 if (chip->ops->disable_msi_reset_irq(chip) &&
1272 chip->ops->disable_msi_reset_irq(chip) < 0) {
1273 bus->rirb_error = 1;
1279 if (chip->probing) {
1280 /* If this critical timeout happens during the codec probing
1281 * phase, this is likely an access to a non-existing codec
1282 * slot. Better to return an error and reset the system.
1287 /* a fatal communication error; need either to reset or to fallback
1288 * to the single_cmd mode
1290 bus->rirb_error = 1;
1291 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1292 bus->response_reset = 1;
1293 return -1; /* give a chance to retry */
1296 dev_err(chip->card->dev,
1297 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1298 chip->last_cmd[addr]);
1299 chip->single_cmd = 1;
1300 bus->response_reset = 0;
1301 /* release CORB/RIRB */
1302 azx_free_cmd_io(chip);
1303 /* disable unsolicited responses */
1304 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1309 * Use the single immediate command instead of CORB/RIRB for simplicity
1311 * Note: according to Intel, this is not preferred use. The command was
1312 * intended for the BIOS only, and may get confused with unsolicited
1313 * responses. So, we shouldn't use it for normal operation from the
1315 * I left the codes, however, for debugging/testing purposes.
1318 /* receive a response */
1319 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1324 /* check IRV busy bit */
1325 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1326 /* reuse rirb.res as the response return value */
1327 chip->rirb.res[addr] = azx_readl(chip, IR);
1332 if (printk_ratelimit())
1333 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1334 azx_readw(chip, IRS));
1335 chip->rirb.res[addr] = -1;
1339 /* send a command */
1340 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1342 struct azx *chip = bus->private_data;
1343 unsigned int addr = azx_command_addr(val);
1346 bus->rirb_error = 0;
1348 /* check ICB busy bit */
1349 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1350 /* Clear IRV valid bit */
1351 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1353 azx_writel(chip, IC, val);
1354 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1356 return azx_single_wait_for_response(chip, addr);
1360 if (printk_ratelimit())
1361 dev_dbg(chip->card->dev,
1362 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1363 azx_readw(chip, IRS), val);
1367 /* receive a response */
1368 static unsigned int azx_single_get_response(struct hda_bus *bus,
1371 struct azx *chip = bus->private_data;
1372 return chip->rirb.res[addr];
1376 * The below are the main callbacks from hda_codec.
1378 * They are just the skeleton to call sub-callbacks according to the
1379 * current setting of chip->single_cmd.
1382 /* send a command */
1383 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1385 struct azx *chip = bus->private_data;
1389 chip->last_cmd[azx_command_addr(val)] = val;
1390 if (chip->single_cmd)
1391 return azx_single_send_cmd(bus, val);
1393 return azx_corb_send_cmd(bus, val);
1395 EXPORT_SYMBOL_GPL(azx_send_cmd);
1397 /* get a response */
1398 static unsigned int azx_get_response(struct hda_bus *bus,
1401 struct azx *chip = bus->private_data;
1404 if (chip->single_cmd)
1405 return azx_single_get_response(bus, addr);
1407 return azx_rirb_get_response(bus, addr);
1409 EXPORT_SYMBOL_GPL(azx_get_response);
1411 #ifdef CONFIG_SND_HDA_DSP_LOADER
1413 * DSP loading code (e.g. for CA0132)
1416 /* use the first stream for loading DSP */
1417 static struct azx_dev *
1418 azx_get_dsp_loader_dev(struct azx *chip)
1420 return &chip->azx_dev[chip->playback_index_offset];
1423 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1424 unsigned int byte_size,
1425 struct snd_dma_buffer *bufp)
1428 struct azx *chip = bus->private_data;
1429 struct azx_dev *azx_dev;
1432 azx_dev = azx_get_dsp_loader_dev(chip);
1435 spin_lock_irq(&chip->reg_lock);
1436 if (azx_dev->running || azx_dev->locked) {
1437 spin_unlock_irq(&chip->reg_lock);
1441 azx_dev->prepared = 0;
1442 chip->saved_azx_dev = *azx_dev;
1443 azx_dev->locked = 1;
1444 spin_unlock_irq(&chip->reg_lock);
1446 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1451 azx_dev->bufsize = byte_size;
1452 azx_dev->period_bytes = byte_size;
1453 azx_dev->format_val = format;
1455 azx_stream_reset(chip, azx_dev);
1457 /* reset BDL address */
1458 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1459 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1462 bdl = (u32 *)azx_dev->bdl.area;
1463 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1467 azx_setup_controller(chip, azx_dev);
1468 dsp_unlock(azx_dev);
1469 return azx_dev->stream_tag;
1472 chip->ops->dma_free_pages(chip, bufp);
1474 spin_lock_irq(&chip->reg_lock);
1475 if (azx_dev->opened)
1476 *azx_dev = chip->saved_azx_dev;
1477 azx_dev->locked = 0;
1478 spin_unlock_irq(&chip->reg_lock);
1480 dsp_unlock(azx_dev);
1484 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1486 struct azx *chip = bus->private_data;
1487 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1490 azx_stream_start(chip, azx_dev);
1492 azx_stream_stop(chip, azx_dev);
1493 azx_dev->running = start;
1496 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1497 struct snd_dma_buffer *dmab)
1499 struct azx *chip = bus->private_data;
1500 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1502 if (!dmab->area || !azx_dev->locked)
1506 /* reset BDL address */
1507 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1508 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1509 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1510 azx_dev->bufsize = 0;
1511 azx_dev->period_bytes = 0;
1512 azx_dev->format_val = 0;
1514 chip->ops->dma_free_pages(chip, dmab);
1517 spin_lock_irq(&chip->reg_lock);
1518 if (azx_dev->opened)
1519 *azx_dev = chip->saved_azx_dev;
1520 azx_dev->locked = 0;
1521 spin_unlock_irq(&chip->reg_lock);
1522 dsp_unlock(azx_dev);
1524 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1526 int azx_alloc_stream_pages(struct azx *chip)
1529 struct snd_card *card = chip->card;
1531 for (i = 0; i < chip->num_streams; i++) {
1532 dsp_lock_init(&chip->azx_dev[i]);
1533 /* allocate memory for the BDL for each stream */
1534 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1536 &chip->azx_dev[i].bdl);
1538 dev_err(card->dev, "cannot allocate BDL\n");
1542 /* allocate memory for the position buffer */
1543 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1544 chip->num_streams * 8, &chip->posbuf);
1546 dev_err(card->dev, "cannot allocate posbuf\n");
1550 /* allocate CORB/RIRB */
1551 err = azx_alloc_cmd_io(chip);
1556 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1558 void azx_free_stream_pages(struct azx *chip)
1561 if (chip->azx_dev) {
1562 for (i = 0; i < chip->num_streams; i++)
1563 if (chip->azx_dev[i].bdl.area)
1564 chip->ops->dma_free_pages(
1565 chip, &chip->azx_dev[i].bdl);
1568 chip->ops->dma_free_pages(chip, &chip->rb);
1569 if (chip->posbuf.area)
1570 chip->ops->dma_free_pages(chip, &chip->posbuf);
1572 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1575 * Lowlevel interface
1578 /* enter link reset */
1579 void azx_enter_link_reset(struct azx *chip)
1581 unsigned long timeout;
1583 /* reset controller */
1584 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1586 timeout = jiffies + msecs_to_jiffies(100);
1587 while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1588 time_before(jiffies, timeout))
1589 usleep_range(500, 1000);
1591 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1593 /* exit link reset */
1594 static void azx_exit_link_reset(struct azx *chip)
1596 unsigned long timeout;
1598 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1600 timeout = jiffies + msecs_to_jiffies(100);
1601 while (!azx_readb(chip, GCTL) &&
1602 time_before(jiffies, timeout))
1603 usleep_range(500, 1000);
1606 /* reset codec link */
1607 static int azx_reset(struct azx *chip, int full_reset)
1612 /* clear STATESTS */
1613 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1615 /* reset controller */
1616 azx_enter_link_reset(chip);
1618 /* delay for >= 100us for codec PLL to settle per spec
1619 * Rev 0.9 section 5.5.1
1621 usleep_range(500, 1000);
1623 /* Bring controller out of reset */
1624 azx_exit_link_reset(chip);
1626 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1627 usleep_range(1000, 1200);
1630 /* check to see if controller is ready */
1631 if (!azx_readb(chip, GCTL)) {
1632 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1636 /* Accept unsolicited responses */
1637 if (!chip->single_cmd)
1638 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1642 if (!chip->codec_mask) {
1643 chip->codec_mask = azx_readw(chip, STATESTS);
1644 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1651 /* enable interrupts */
1652 static void azx_int_enable(struct azx *chip)
1654 /* enable controller CIE and GIE */
1655 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1656 ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1659 /* disable interrupts */
1660 static void azx_int_disable(struct azx *chip)
1664 /* disable interrupts in stream descriptor */
1665 for (i = 0; i < chip->num_streams; i++) {
1666 struct azx_dev *azx_dev = &chip->azx_dev[i];
1667 azx_sd_writeb(chip, azx_dev, SD_CTL,
1668 azx_sd_readb(chip, azx_dev, SD_CTL) &
1672 /* disable SIE for all streams */
1673 azx_writeb(chip, INTCTL, 0);
1675 /* disable controller CIE and GIE */
1676 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1677 ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1680 /* clear interrupts */
1681 static void azx_int_clear(struct azx *chip)
1685 /* clear stream status */
1686 for (i = 0; i < chip->num_streams; i++) {
1687 struct azx_dev *azx_dev = &chip->azx_dev[i];
1688 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1691 /* clear STATESTS */
1692 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1694 /* clear rirb status */
1695 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1697 /* clear int status */
1698 azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1702 * reset and start the controller registers
1704 void azx_init_chip(struct azx *chip, int full_reset)
1706 if (chip->initialized)
1709 /* reset controller */
1710 azx_reset(chip, full_reset);
1712 /* initialize interrupts */
1713 azx_int_clear(chip);
1714 azx_int_enable(chip);
1716 /* initialize the codec command I/O */
1717 if (!chip->single_cmd)
1718 azx_init_cmd_io(chip);
1720 /* program the position buffer */
1721 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1722 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1724 chip->initialized = 1;
1726 EXPORT_SYMBOL_GPL(azx_init_chip);
1728 void azx_stop_chip(struct azx *chip)
1730 if (!chip->initialized)
1733 /* disable interrupts */
1734 azx_int_disable(chip);
1735 azx_int_clear(chip);
1737 /* disable CORB/RIRB */
1738 azx_free_cmd_io(chip);
1740 /* disable position buffer */
1741 azx_writel(chip, DPLBASE, 0);
1742 azx_writel(chip, DPUBASE, 0);
1744 chip->initialized = 0;
1746 EXPORT_SYMBOL_GPL(azx_stop_chip);
1751 irqreturn_t azx_interrupt(int irq, void *dev_id)
1753 struct azx *chip = dev_id;
1754 struct azx_dev *azx_dev;
1759 #ifdef CONFIG_PM_RUNTIME
1760 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1761 if (chip->card->dev->power.runtime_status != RPM_ACTIVE)
1765 spin_lock(&chip->reg_lock);
1767 if (chip->disabled) {
1768 spin_unlock(&chip->reg_lock);
1772 status = azx_readl(chip, INTSTS);
1773 if (status == 0 || status == 0xffffffff) {
1774 spin_unlock(&chip->reg_lock);
1778 for (i = 0; i < chip->num_streams; i++) {
1779 azx_dev = &chip->azx_dev[i];
1780 if (status & azx_dev->sd_int_sta_mask) {
1781 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1782 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1783 if (!azx_dev->substream || !azx_dev->running ||
1784 !(sd_status & SD_INT_COMPLETE))
1786 /* check whether this IRQ is really acceptable */
1787 if (!chip->ops->position_check ||
1788 chip->ops->position_check(chip, azx_dev)) {
1789 spin_unlock(&chip->reg_lock);
1790 snd_pcm_period_elapsed(azx_dev->substream);
1791 spin_lock(&chip->reg_lock);
1796 /* clear rirb int */
1797 status = azx_readb(chip, RIRBSTS);
1798 if (status & RIRB_INT_MASK) {
1799 if (status & RIRB_INT_RESPONSE) {
1800 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1802 azx_update_rirb(chip);
1804 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1807 spin_unlock(&chip->reg_lock);
1811 EXPORT_SYMBOL_GPL(azx_interrupt);
1818 * Probe the given codec address
1820 static int probe_codec(struct azx *chip, int addr)
1822 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1823 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1826 mutex_lock(&chip->bus->cmd_mutex);
1828 azx_send_cmd(chip->bus, cmd);
1829 res = azx_get_response(chip->bus, addr);
1831 mutex_unlock(&chip->bus->cmd_mutex);
1834 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1838 static void azx_bus_reset(struct hda_bus *bus)
1840 struct azx *chip = bus->private_data;
1843 azx_stop_chip(chip);
1844 azx_init_chip(chip, 1);
1846 if (chip->initialized) {
1848 list_for_each_entry(p, &chip->pcm_list, list)
1849 snd_pcm_suspend_all(p->pcm);
1850 snd_hda_suspend(chip->bus);
1851 snd_hda_resume(chip->bus);
1858 /* power-up/down the controller */
1859 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1861 struct azx *chip = bus->private_data;
1863 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1867 pm_runtime_get_sync(chip->card->dev);
1869 pm_runtime_put_sync(chip->card->dev);
1873 static int get_jackpoll_interval(struct azx *chip)
1878 if (!chip->jackpoll_ms)
1881 i = chip->jackpoll_ms[chip->dev_index];
1884 if (i < 50 || i > 60000)
1887 j = msecs_to_jiffies(i);
1889 dev_warn(chip->card->dev,
1890 "jackpoll_ms value out of range: %d\n", i);
1894 /* Codec initialization */
1895 int azx_codec_create(struct azx *chip, const char *model,
1896 unsigned int max_slots,
1899 struct hda_bus_template bus_temp;
1902 memset(&bus_temp, 0, sizeof(bus_temp));
1903 bus_temp.private_data = chip;
1904 bus_temp.modelname = model;
1905 bus_temp.pci = chip->pci;
1906 bus_temp.ops.command = azx_send_cmd;
1907 bus_temp.ops.get_response = azx_get_response;
1908 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1909 bus_temp.ops.bus_reset = azx_bus_reset;
1911 bus_temp.power_save = power_save_to;
1912 bus_temp.ops.pm_notify = azx_power_notify;
1914 #ifdef CONFIG_SND_HDA_DSP_LOADER
1915 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1916 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1917 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1920 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1924 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1925 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1926 chip->bus->needs_damn_long_delay = 1;
1931 max_slots = AZX_DEFAULT_CODECS;
1933 /* First try to probe all given codec slots */
1934 for (c = 0; c < max_slots; c++) {
1935 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1936 if (probe_codec(chip, c) < 0) {
1937 /* Some BIOSen give you wrong codec addresses
1940 dev_warn(chip->card->dev,
1941 "Codec #%d probe error; disabling it...\n", c);
1942 chip->codec_mask &= ~(1 << c);
1943 /* More badly, accessing to a non-existing
1944 * codec often screws up the controller chip,
1945 * and disturbs the further communications.
1946 * Thus if an error occurs during probing,
1947 * better to reset the controller chip to
1948 * get back to the sanity state.
1950 azx_stop_chip(chip);
1951 azx_init_chip(chip, 1);
1956 /* AMD chipsets often cause the communication stalls upon certain
1957 * sequence like the pin-detection. It seems that forcing the synced
1958 * access works around the stall. Grrr...
1960 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1961 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1962 chip->bus->sync_write = 1;
1963 chip->bus->allow_bus_reset = 1;
1966 /* Then create codec instances */
1967 for (c = 0; c < max_slots; c++) {
1968 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1969 struct hda_codec *codec;
1970 err = snd_hda_codec_new(chip->bus, c, &codec);
1973 codec->jackpoll_interval = get_jackpoll_interval(chip);
1974 codec->beep_mode = chip->beep_mode;
1979 dev_err(chip->card->dev, "no codecs initialized\n");
1984 EXPORT_SYMBOL_GPL(azx_codec_create);
1986 /* configure each codec instance */
1987 int azx_codec_configure(struct azx *chip)
1989 struct hda_codec *codec;
1990 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1991 snd_hda_codec_configure(codec);
1995 EXPORT_SYMBOL_GPL(azx_codec_configure);
1997 /* mixer creation - all stuff is implemented in hda module */
1998 int azx_mixer_create(struct azx *chip)
2000 return snd_hda_build_controls(chip->bus);
2002 EXPORT_SYMBOL_GPL(azx_mixer_create);
2005 /* initialize SD streams */
2006 int azx_init_stream(struct azx *chip)
2010 /* initialize each stream (aka device)
2011 * assign the starting bdl address to each stream (device)
2014 for (i = 0; i < chip->num_streams; i++) {
2015 struct azx_dev *azx_dev = &chip->azx_dev[i];
2016 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
2017 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2018 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
2019 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2020 azx_dev->sd_int_sta_mask = 1 << i;
2021 /* stream tag: must be non-zero and unique */
2023 azx_dev->stream_tag = i + 1;
2028 EXPORT_SYMBOL_GPL(azx_init_stream);
2030 MODULE_LICENSE("GPL");
2031 MODULE_DESCRIPTION("Common HDA driver funcitons");