^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) // Copyright(c) 2018 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) bool snd_sof_pci_update_bits_unlocked(struct snd_sof_dev *sdev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct pci_dev *pci = to_pci_dev(sdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned int old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) pci_read_config_dword(pci, offset, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) old = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) dev_dbg(sdev->dev, "Debug PCIR: %8.8x at %8.8x\n", old & mask, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) new = (old & ~mask) | (value & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (old == new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pci_write_config_dword(pci, offset, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) dev_dbg(sdev->dev, "Debug PCIW: %8.8x at %8.8x\n", value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spin_lock_irqsave(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) change = snd_sof_pci_update_bits_unlocked(sdev, offset, mask, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) spin_unlock_irqrestore(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) EXPORT_SYMBOL(snd_sof_pci_update_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 offset, u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ret = snd_sof_dsp_read(sdev, bar, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) old = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) new = (old & ~mask) | (value & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (old == new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) snd_sof_dsp_write(sdev, bar, offset, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) EXPORT_SYMBOL(snd_sof_dsp_update_bits_unlocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 offset, u64 mask, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u64 old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) old = snd_sof_dsp_read64(sdev, bar, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) new = (old & ~mask) | (value & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (old == new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) snd_sof_dsp_write64(sdev, bar, offset, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) EXPORT_SYMBOL(snd_sof_dsp_update_bits64_unlocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* This is for registers bits with attribute RWC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bool change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) spin_lock_irqsave(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) change = snd_sof_dsp_update_bits_unlocked(sdev, bar, offset, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) spin_unlock_irqrestore(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) EXPORT_SYMBOL(snd_sof_dsp_update_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u64 mask, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bool change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) spin_lock_irqsave(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) change = snd_sof_dsp_update_bits64_unlocked(sdev, bar, offset, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spin_unlock_irqrestore(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL(snd_sof_dsp_update_bits64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void snd_sof_dsp_update_bits_forced_unlocked(struct snd_sof_dev *sdev, u32 bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 offset, u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ret = snd_sof_dsp_read(sdev, bar, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) old = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) new = (old & ~mask) | (value & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) snd_sof_dsp_write(sdev, bar, offset, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* This is for registers bits with attribute RWC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 offset, u32 mask, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_lock_irqsave(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) snd_sof_dsp_update_bits_forced_unlocked(sdev, bar, offset, mask, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_unlock_irqrestore(&sdev->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) EXPORT_SYMBOL(snd_sof_dsp_update_bits_forced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dev_err(sdev->dev, "error : DSP panic!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * check if DSP is not ready and did not set the dsp_oops_offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * if the dsp_oops_offset is not set, set it from the panic message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Also add a check to memory window setting with panic message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!sdev->dsp_oops_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sdev->dsp_oops_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dev_dbg(sdev->dev, "panic: dsp_oops_offset %zu offset %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) sdev->dsp_oops_offset, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) snd_sof_trace_notify_for_error(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) EXPORT_SYMBOL(snd_sof_dsp_panic);