1 // SPDX-License-Identifier: GPL-2.0-only
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
9 // Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 // Michal Sienkiewicz <michal.sienkiewicz@intel.com>
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <sound/hda_codec.h>
20 #include <sound/hda_i915.h>
21 #include <sound/hda_register.h>
22 #include <sound/hdaudio.h>
23 #include <sound/hdaudio_ext.h>
24 #include <sound/intel-dsp-config.h>
25 #include <sound/intel-nhlt.h>
26 #include "../../codecs/hda.h"
31 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
32 module_param(pgctl_mask, uint, 0444);
33 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
35 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
36 module_param(cgctl_mask, uint, 0444);
37 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
40 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
42 struct pci_dev *pci = to_pci_dev(bus->dev);
45 pci_read_config_dword(pci, reg, &data);
47 data |= (value & mask);
48 pci_write_config_dword(pci, reg, data);
51 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
53 u32 value = enable ? 0 : pgctl_mask;
55 avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
58 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
60 u32 value = enable ? cgctl_mask : 0;
62 avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
65 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
67 avs_hdac_clock_gating_enable(&adev->base.core, enable);
70 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
73 if (atomic_inc_and_test(&adev->l1sen_counter))
74 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN,
77 if (atomic_dec_return(&adev->l1sen_counter) == -1)
78 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, 0);
82 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
84 unsigned int cp_streams, pb_streams;
87 gcap = snd_hdac_chip_readw(bus, GCAP);
88 cp_streams = (gcap >> 8) & 0x0F;
89 pb_streams = (gcap >> 12) & 0x0F;
90 bus->num_streams = cp_streams + pb_streams;
92 snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
93 snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
95 return snd_hdac_bus_alloc_stream_pages(bus);
98 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
100 struct hdac_ext_link *hlink;
103 avs_hdac_clock_gating_enable(bus, false);
104 ret = snd_hdac_bus_init_chip(bus, full_reset);
106 /* Reset stream-to-link mapping */
107 list_for_each_entry(hlink, &bus->hlink_list, list)
108 writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
110 avs_hdac_clock_gating_enable(bus, true);
112 /* Set DUM bit to address incorrect position reporting for capture
113 * streams. In order to do so, CTRL needs to be out of reset state
115 snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
120 static int probe_codec(struct hdac_bus *bus, int addr)
122 struct hda_codec *codec;
123 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
124 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
125 unsigned int res = -1;
128 mutex_lock(&bus->cmd_mutex);
129 snd_hdac_bus_send_cmd(bus, cmd);
130 snd_hdac_bus_get_response(bus, addr, &res);
131 mutex_unlock(&bus->cmd_mutex);
135 dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
137 codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
139 dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
140 return PTR_ERR(codec);
143 * Allow avs_core suspend by forcing suspended state on all
144 * of its codec child devices. Component interested in
145 * dealing with hda codecs directly takes pm responsibilities
147 pm_runtime_set_suspended(hda_codec_dev(codec));
149 /* configure effectively creates new ASoC component */
150 ret = snd_hda_codec_configure(codec);
152 dev_err(bus->dev, "failed to config codec %d\n", ret);
159 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
163 /* First try to probe all given codec slots */
164 for (c = 0; c < HDA_MAX_CODECS; c++) {
165 if (!(bus->codec_mask & BIT(c)))
168 if (!probe_codec(bus, c))
169 /* success, continue probing */
173 * Some BIOSen give you wrong codec addresses
176 dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
177 bus->codec_mask &= ~BIT(c);
179 * More badly, accessing to a non-existing
180 * codec often screws up the controller bus,
181 * and disturbs the further communications.
182 * Thus if an error occurs during probing,
183 * better to reset the controller bus to get
184 * back to the sanity state.
186 snd_hdac_bus_stop_chip(bus);
187 avs_hdac_bus_init_chip(bus, true);
191 static void avs_hda_probe_work(struct work_struct *work)
193 struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
194 struct hdac_bus *bus = &adev->base.core;
195 struct hdac_ext_link *hlink;
198 pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
200 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
201 avs_hdac_bus_init_chip(bus, true);
202 avs_hdac_bus_probe_codecs(bus);
203 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
205 /* with all codecs probed, links can be powered down */
206 list_for_each_entry(hlink, &bus->hlink_list, list)
207 snd_hdac_ext_bus_link_put(bus, hlink);
209 snd_hdac_ext_bus_ppcap_enable(bus, true);
210 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
212 ret = avs_dsp_first_boot_firmware(adev);
216 adev->nhlt = intel_nhlt_init(adev->dev);
218 dev_info(bus->dev, "platform has no NHLT\n");
219 avs_debugfs_init(adev);
221 avs_register_all_boards(adev);
224 pm_runtime_set_autosuspend_delay(bus->dev, 2000);
225 pm_runtime_use_autosuspend(bus->dev);
226 pm_runtime_mark_last_busy(bus->dev);
227 pm_runtime_put_autosuspend(bus->dev);
228 pm_runtime_allow(bus->dev);
231 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
233 u64 prev_pos, pos, num_bytes;
235 div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
236 pos = snd_hdac_stream_get_pos_posbuf(stream);
239 num_bytes = (buffer_size - prev_pos) + pos;
241 num_bytes = pos - prev_pos;
243 stream->curr_pos += num_bytes;
246 /* called from IRQ */
247 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
249 if (stream->substream) {
250 snd_pcm_period_elapsed(stream->substream);
251 } else if (stream->cstream) {
252 u64 buffer_size = stream->cstream->runtime->buffer_size;
254 hdac_stream_update_pos(stream, buffer_size);
255 snd_compr_fragment_elapsed(stream->cstream);
259 static irqreturn_t hdac_bus_irq_handler(int irq, void *context)
261 struct hdac_bus *bus = context;
262 u32 mask, int_enable;
266 if (!pm_runtime_active(bus->dev))
269 spin_lock(&bus->reg_lock);
271 status = snd_hdac_chip_readl(bus, INTSTS);
272 if (status == 0 || status == UINT_MAX) {
273 spin_unlock(&bus->reg_lock);
278 status = snd_hdac_chip_readb(bus, RIRBSTS);
279 if (status & RIRB_INT_MASK) {
280 if (status & RIRB_INT_RESPONSE)
281 snd_hdac_bus_update_rirb(bus);
282 snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
285 mask = (0x1 << bus->num_streams) - 1;
287 status = snd_hdac_chip_readl(bus, INTSTS);
290 /* Disable stream interrupts; Re-enable in bottom half */
291 int_enable = snd_hdac_chip_readl(bus, INTCTL);
292 snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask)));
293 ret = IRQ_WAKE_THREAD;
298 spin_unlock(&bus->reg_lock);
302 static irqreturn_t hdac_bus_irq_thread(int irq, void *context)
304 struct hdac_bus *bus = context;
310 status = snd_hdac_chip_readl(bus, INTSTS);
312 snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream);
314 /* Re-enable stream interrupts */
315 mask = (0x1 << bus->num_streams) - 1;
316 spin_lock_irqsave(&bus->reg_lock, flags);
317 int_enable = snd_hdac_chip_readl(bus, INTCTL);
318 snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask));
319 spin_unlock_irqrestore(&bus->reg_lock, flags);
324 static irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
326 struct avs_dev *adev = dev_id;
328 return avs_dsp_op(adev, irq_handler);
331 static irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
333 struct avs_dev *adev = dev_id;
335 return avs_dsp_op(adev, irq_thread);
338 static int avs_hdac_acquire_irq(struct avs_dev *adev)
340 struct hdac_bus *bus = &adev->base.core;
341 struct pci_dev *pci = to_pci_dev(bus->dev);
344 /* request one and check that we only got one interrupt */
345 ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
347 dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
351 ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus,
354 dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
358 ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
361 dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
362 goto free_stream_irq;
368 pci_free_irq(pci, 0, bus);
370 pci_free_irq_vectors(pci);
374 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
376 struct hda_bus *bus = &adev->base;
378 struct device *dev = &pci->dev;
381 ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
385 bus->core.use_posbuf = 1;
386 bus->core.bdl_pos_adj = 0;
387 bus->core.sync_write = 1;
389 bus->mixer_assigned = -1;
390 mutex_init(&bus->prepare_mutex);
392 ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
395 ret = avs_ipc_init(ipc, dev);
399 adev->modcfg_buf = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
400 if (!adev->modcfg_buf)
404 adev->spec = (const struct avs_spec *)id->driver_data;
406 adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
407 INIT_WORK(&adev->probe_work, avs_hda_probe_work);
408 INIT_LIST_HEAD(&adev->comp_list);
409 INIT_LIST_HEAD(&adev->path_list);
410 INIT_LIST_HEAD(&adev->fw_list);
411 init_completion(&adev->fw_ready);
412 spin_lock_init(&adev->path_list_lock);
413 mutex_init(&adev->modres_mutex);
414 mutex_init(&adev->comp_list_mutex);
415 mutex_init(&adev->path_mutex);
420 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
422 struct hdac_bus *bus;
423 struct avs_dev *adev;
424 struct device *dev = &pci->dev;
427 ret = snd_intel_dsp_driver_probe(pci);
428 if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS)
431 ret = pcim_enable_device(pci);
435 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
438 ret = avs_bus_init(adev, pci, id);
440 dev_err(dev, "failed to init avs bus: %d\n", ret);
444 ret = pci_request_regions(pci, "AVS HDAudio");
448 bus = &adev->base.core;
449 bus->addr = pci_resource_start(pci, 0);
450 bus->remap_addr = pci_ioremap_bar(pci, 0);
451 if (!bus->remap_addr) {
452 dev_err(bus->dev, "ioremap error\n");
457 adev->dsp_ba = pci_ioremap_bar(pci, 4);
459 dev_err(bus->dev, "ioremap error\n");
464 snd_hdac_bus_parse_capabilities(bus);
466 snd_hdac_ext_bus_get_ml_capabilities(bus);
468 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
469 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
470 dma_set_max_seg_size(dev, UINT_MAX);
472 ret = avs_hdac_bus_init_streams(bus);
474 dev_err(dev, "failed to init streams: %d\n", ret);
475 goto err_init_streams;
478 ret = avs_hdac_acquire_irq(adev);
480 dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
481 goto err_acquire_irq;
485 pci_set_drvdata(pci, bus);
486 device_disable_async_suspend(dev);
488 ret = snd_hdac_i915_init(bus);
489 if (ret == -EPROBE_DEFER)
492 dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
494 schedule_work(&adev->probe_work);
499 pci_free_irq(pci, 0, adev);
500 pci_free_irq(pci, 0, bus);
501 pci_free_irq_vectors(pci);
502 pci_clear_master(pci);
503 pci_set_drvdata(pci, NULL);
505 snd_hdac_bus_free_stream_pages(bus);
506 snd_hdac_ext_stream_free_all(bus);
508 iounmap(adev->dsp_ba);
510 iounmap(bus->remap_addr);
512 pci_release_regions(pci);
516 static void avs_pci_shutdown(struct pci_dev *pci)
518 struct hdac_bus *bus = pci_get_drvdata(pci);
519 struct avs_dev *adev = hdac_to_avs(bus);
521 cancel_work_sync(&adev->probe_work);
522 avs_ipc_block(adev->ipc);
524 snd_hdac_stop_streams(bus);
525 avs_dsp_op(adev, int_control, false);
526 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
527 snd_hdac_ext_bus_link_power_down_all(bus);
529 snd_hdac_bus_stop_chip(bus);
530 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
532 if (avs_platattr_test(adev, CLDMA))
533 pci_free_irq(pci, 0, &code_loader);
534 pci_free_irq(pci, 0, adev);
535 pci_free_irq(pci, 0, bus);
536 pci_free_irq_vectors(pci);
539 static void avs_pci_remove(struct pci_dev *pci)
541 struct hdac_device *hdev, *save;
542 struct hdac_bus *bus = pci_get_drvdata(pci);
543 struct avs_dev *adev = hdac_to_avs(bus);
545 cancel_work_sync(&adev->probe_work);
546 avs_ipc_block(adev->ipc);
548 avs_unregister_all_boards(adev);
550 avs_debugfs_exit(adev);
552 intel_nhlt_free(adev->nhlt);
554 if (avs_platattr_test(adev, CLDMA))
555 hda_cldma_free(&code_loader);
557 snd_hdac_stop_streams_and_chip(bus);
558 avs_dsp_op(adev, int_control, false);
559 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
561 /* it is safe to remove all codecs from the system now */
562 list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
563 snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
565 snd_hdac_bus_free_stream_pages(bus);
566 snd_hdac_ext_stream_free_all(bus);
567 /* reverse ml_capabilities */
568 snd_hdac_ext_link_free_all(bus);
569 snd_hdac_ext_bus_exit(bus);
571 avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
572 snd_hdac_ext_bus_ppcap_enable(bus, false);
574 /* snd_hdac_stop_streams_and_chip does that already? */
575 snd_hdac_bus_stop_chip(bus);
576 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
577 if (bus->audio_component)
578 snd_hdac_i915_exit(bus);
580 avs_module_info_free(adev);
581 pci_free_irq(pci, 0, adev);
582 pci_free_irq(pci, 0, bus);
583 pci_free_irq_vectors(pci);
584 iounmap(bus->remap_addr);
585 iounmap(adev->dsp_ba);
586 pci_release_regions(pci);
588 /* Firmware is not needed anymore */
589 avs_release_firmwares(adev);
591 /* pm_runtime_forbid() can rpm_resume() which we do not want */
592 pm_runtime_disable(&pci->dev);
593 pm_runtime_forbid(&pci->dev);
594 pm_runtime_enable(&pci->dev);
595 pm_runtime_get_noresume(&pci->dev);
598 static int avs_suspend_standby(struct avs_dev *adev)
600 struct hdac_bus *bus = &adev->base.core;
601 struct pci_dev *pci = adev->base.pci;
603 if (bus->cmd_dma_state)
604 snd_hdac_bus_stop_cmd_io(bus);
606 snd_hdac_ext_bus_link_power_down_all(bus);
608 enable_irq_wake(pci->irq);
614 static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power)
616 struct hdac_bus *bus = &adev->base.core;
619 flush_work(&adev->probe_work);
620 if (low_power && adev->num_lp_paths)
621 return avs_suspend_standby(adev);
623 snd_hdac_ext_bus_link_power_down_all(bus);
625 ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
627 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
628 * Do not block entire system from suspending if that's the case.
630 if (ret && ret != -EPERM) {
631 dev_err(adev->dev, "set dx failed: %d\n", ret);
632 return AVS_IPC_RET(ret);
635 avs_ipc_block(adev->ipc);
636 avs_dsp_op(adev, int_control, false);
637 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
639 ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
641 dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
645 snd_hdac_ext_bus_ppcap_enable(bus, false);
646 /* disable LP SRAM retention */
647 avs_hda_power_gating_enable(adev, false);
648 snd_hdac_bus_stop_chip(bus);
649 /* disable CG when putting controller to reset */
650 avs_hdac_clock_gating_enable(bus, false);
651 snd_hdac_bus_enter_link_reset(bus);
652 avs_hdac_clock_gating_enable(bus, true);
654 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
659 static int avs_resume_standby(struct avs_dev *adev)
661 struct hdac_bus *bus = &adev->base.core;
662 struct pci_dev *pci = adev->base.pci;
664 pci_restore_state(pci);
665 disable_irq_wake(pci->irq);
667 snd_hdac_ext_bus_link_power_up_all(bus);
669 if (bus->cmd_dma_state)
670 snd_hdac_bus_init_cmd_io(bus);
675 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
677 struct hdac_bus *bus = &adev->base.core;
680 if (low_power && adev->num_lp_paths)
681 return avs_resume_standby(adev);
683 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
684 avs_hdac_bus_init_chip(bus, true);
686 snd_hdac_ext_bus_ppcap_enable(bus, true);
687 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
689 ret = avs_dsp_boot_firmware(adev, purge);
691 dev_err(adev->dev, "firmware boot failed: %d\n", ret);
698 static int __maybe_unused avs_suspend(struct device *dev)
700 return avs_suspend_common(to_avs_dev(dev), true);
703 static int __maybe_unused avs_resume(struct device *dev)
705 return avs_resume_common(to_avs_dev(dev), true, true);
708 static int __maybe_unused avs_runtime_suspend(struct device *dev)
710 return avs_suspend_common(to_avs_dev(dev), true);
713 static int __maybe_unused avs_runtime_resume(struct device *dev)
715 return avs_resume_common(to_avs_dev(dev), true, false);
718 static int __maybe_unused avs_freeze(struct device *dev)
720 return avs_suspend_common(to_avs_dev(dev), false);
722 static int __maybe_unused avs_thaw(struct device *dev)
724 return avs_resume_common(to_avs_dev(dev), false, true);
727 static int __maybe_unused avs_poweroff(struct device *dev)
729 return avs_suspend_common(to_avs_dev(dev), false);
732 static int __maybe_unused avs_restore(struct device *dev)
734 return avs_resume_common(to_avs_dev(dev), false, true);
737 static const struct dev_pm_ops avs_dev_pm = {
738 .suspend = avs_suspend,
739 .resume = avs_resume,
740 .freeze = avs_freeze,
742 .poweroff = avs_poweroff,
743 .restore = avs_restore,
744 SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
747 static const struct avs_sram_spec skl_sram_spec = {
748 .base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
749 .window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
750 .rom_status_offset = SKL_ADSP_SRAM_BASE_OFFSET,
753 static const struct avs_sram_spec apl_sram_spec = {
754 .base_offset = APL_ADSP_SRAM_BASE_OFFSET,
755 .window_size = APL_ADSP_SRAM_WINDOW_SIZE,
756 .rom_status_offset = APL_ADSP_SRAM_BASE_OFFSET,
759 static const struct avs_hipc_spec skl_hipc_spec = {
760 .req_offset = SKL_ADSP_REG_HIPCI,
761 .req_ext_offset = SKL_ADSP_REG_HIPCIE,
762 .req_busy_mask = SKL_ADSP_HIPCI_BUSY,
763 .ack_offset = SKL_ADSP_REG_HIPCIE,
764 .ack_done_mask = SKL_ADSP_HIPCIE_DONE,
765 .rsp_offset = SKL_ADSP_REG_HIPCT,
766 .rsp_busy_mask = SKL_ADSP_HIPCT_BUSY,
767 .ctl_offset = SKL_ADSP_REG_HIPCCTL,
770 static const struct avs_hipc_spec cnl_hipc_spec = {
771 .req_offset = CNL_ADSP_REG_HIPCIDR,
772 .req_ext_offset = CNL_ADSP_REG_HIPCIDD,
773 .req_busy_mask = CNL_ADSP_HIPCIDR_BUSY,
774 .ack_offset = CNL_ADSP_REG_HIPCIDA,
775 .ack_done_mask = CNL_ADSP_HIPCIDA_DONE,
776 .rsp_offset = CNL_ADSP_REG_HIPCTDR,
777 .rsp_busy_mask = CNL_ADSP_HIPCTDR_BUSY,
778 .ctl_offset = CNL_ADSP_REG_HIPCCTL,
781 static const struct avs_spec skl_desc = {
783 .min_fw_version = { 9, 21, 0, 4732 },
784 .dsp_ops = &avs_skl_dsp_ops,
786 .attributes = AVS_PLATATTR_CLDMA,
787 .sram = &skl_sram_spec,
788 .hipc = &skl_hipc_spec,
791 static const struct avs_spec apl_desc = {
793 .min_fw_version = { 9, 22, 1, 4323 },
794 .dsp_ops = &avs_apl_dsp_ops,
796 .attributes = AVS_PLATATTR_IMR,
797 .sram = &apl_sram_spec,
798 .hipc = &skl_hipc_spec,
801 static const struct avs_spec cnl_desc = {
803 .min_fw_version = { 10, 23, 0, 5314 },
804 .dsp_ops = &avs_cnl_dsp_ops,
806 .attributes = AVS_PLATATTR_IMR,
807 .sram = &apl_sram_spec,
808 .hipc = &cnl_hipc_spec,
811 static const struct avs_spec icl_desc = {
813 .min_fw_version = { 10, 23, 0, 5040 },
814 .dsp_ops = &avs_icl_dsp_ops,
816 .attributes = AVS_PLATATTR_IMR,
817 .sram = &apl_sram_spec,
818 .hipc = &cnl_hipc_spec,
821 static const struct avs_spec jsl_desc = {
823 .min_fw_version = { 10, 26, 0, 5872 },
824 .dsp_ops = &avs_icl_dsp_ops,
826 .attributes = AVS_PLATATTR_IMR,
827 .sram = &apl_sram_spec,
828 .hipc = &cnl_hipc_spec,
831 static const struct pci_device_id avs_ids[] = {
832 { PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
833 { PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) },
834 { PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) },
835 { PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) },
836 { PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) },
837 { PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) },
838 { PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) },
839 { PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) },
840 { PCI_DEVICE_DATA(INTEL, HDA_CNL_LP, &cnl_desc) },
841 { PCI_DEVICE_DATA(INTEL, HDA_CNL_H, &cnl_desc) },
842 { PCI_DEVICE_DATA(INTEL, HDA_CML_LP, &cnl_desc) },
843 { PCI_DEVICE_DATA(INTEL, HDA_CML_H, &cnl_desc) },
844 { PCI_DEVICE_DATA(INTEL, HDA_RKL_S, &cnl_desc) },
845 { PCI_DEVICE_DATA(INTEL, HDA_ICL_LP, &icl_desc) },
846 { PCI_DEVICE_DATA(INTEL, HDA_ICL_N, &icl_desc) },
847 { PCI_DEVICE_DATA(INTEL, HDA_ICL_H, &icl_desc) },
848 { PCI_DEVICE_DATA(INTEL, HDA_JSL_N, &jsl_desc) },
851 MODULE_DEVICE_TABLE(pci, avs_ids);
853 static struct pci_driver avs_pci_driver = {
854 .name = KBUILD_MODNAME,
856 .probe = avs_pci_probe,
857 .remove = avs_pci_remove,
858 .shutdown = avs_pci_shutdown,
863 module_pci_driver(avs_pci_driver);
865 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
866 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
867 MODULE_DESCRIPTION("Intel cAVS sound driver");
868 MODULE_LICENSE("GPL");