^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __FIRMWARE_LOADER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __FIRMWARE_LOADER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <generated/utsrelease.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * enum fw_opt - options to control firmware loading behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * @FW_OPT_UEVENT: Enables the fallback mechanism to send a kobject uevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * when the firmware is not found. Userspace is in charge to load the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * firmware using the sysfs loading facility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * @FW_OPT_NOWAIT: Used to describe the firmware request is asynchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @FW_OPT_USERHELPER: Enable the fallback mechanism, in case the direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * filesystem lookup fails at finding the firmware. For details refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * firmware_fallback_sysfs().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @FW_OPT_NO_WARN: Quiet, avoid printing warning messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @FW_OPT_NOCACHE: Disables firmware caching. Firmware caching is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * cache the firmware upon suspend, so that upon resume races against the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * firmware file lookup on storage is avoided. Used for calls where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * file may be too big, or where the driver takes charge of its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * firmware caching mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * the platform's main firmware. If both this fallback and the sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * fallback are enabled, then this fallback will be tried first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @FW_OPT_PARTIAL: Allow partial read of firmware instead of needing to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * entire file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum fw_opt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) FW_OPT_UEVENT = BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) FW_OPT_NOWAIT = BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) FW_OPT_USERHELPER = BIT(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) FW_OPT_NO_WARN = BIT(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) FW_OPT_NOCACHE = BIT(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) FW_OPT_NOFALLBACK_SYSFS = BIT(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) FW_OPT_FALLBACK_PLATFORM = BIT(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) FW_OPT_PARTIAL = BIT(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) enum fw_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) FW_STATUS_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) FW_STATUS_LOADING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) FW_STATUS_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) FW_STATUS_ABORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Concurrent request_firmware() for the same firmware need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * serialized. struct fw_state is simple state machine which hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * state of the firmware loading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct fw_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) enum fw_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct fw_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct firmware_cache *fwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct fw_state fw_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) size_t allocated_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 opt_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #ifdef CONFIG_FW_LOADER_PAGED_BUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bool is_paged_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int page_array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #ifdef CONFIG_FW_LOADER_USER_HELPER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bool need_uevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct list_head pending_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) const char *fw_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) extern struct mutex fw_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline bool __fw_state_check(struct fw_priv *fw_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) enum fw_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct fw_state *fw_st = &fw_priv->fw_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return fw_st->status == status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static inline int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct fw_state *fw_st = &fw_priv->fw_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void __fw_state_set(struct fw_priv *fw_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) enum fw_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct fw_state *fw_st = &fw_priv->fw_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) WRITE_ONCE(fw_st->status, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #ifdef CONFIG_FW_LOADER_USER_HELPER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Doing this here ensures that the fw_priv is deleted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * the pending list in all abort/done paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) list_del_init(&fw_priv->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) complete_all(&fw_st->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline void fw_state_aborted(struct fw_priv *fw_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __fw_state_set(fw_priv, FW_STATUS_ABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline void fw_state_start(struct fw_priv *fw_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __fw_state_set(fw_priv, FW_STATUS_LOADING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline void fw_state_done(struct fw_priv *fw_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) __fw_state_set(fw_priv, FW_STATUS_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int assign_fw(struct firmware *fw, struct device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifdef CONFIG_FW_LOADER_PAGED_BUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void fw_free_paged_buf(struct fw_priv *fw_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int fw_map_paged_buf(struct fw_priv *fw_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bool fw_is_paged_buf(struct fw_priv *fw_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #endif /* __FIRMWARE_LOADER_H */