^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef _LINUX_BINDER_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define _LINUX_BINDER_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/uidgid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <uapi/linux/android/binderfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "binder_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct binder_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct binder_node *binder_context_mgr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct mutex context_mgr_node_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) kuid_t binder_context_mgr_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * struct binder_device - information about a binder device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * @hlist: list of binder devices (only used for devices requested via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * CONFIG_ANDROID_BINDER_DEVICES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @miscdev: information about a binder character device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @context: binder context information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @binderfs_inode: This is the inode of the root dentry of the super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * belonging to a binderfs mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct binder_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct hlist_node hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct miscdevice miscdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct binder_context context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct inode *binderfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) refcount_t ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * binderfs_mount_opts - mount options for binderfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @max: maximum number of allocatable binderfs binder devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @stats_mode: enable binder stats in binderfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct binderfs_mount_opts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int stats_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * binderfs_info - information about a binderfs mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @ipc_ns: The ipc namespace the binderfs mount belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @control_dentry: This records the dentry of this binderfs mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * binder-control device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @root_uid: uid that needs to be used when a new binder device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @root_gid: gid that needs to be used when a new binder device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @mount_opts: The mount options in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @device_count: The current number of allocated binder devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @proc_log_dir: Pointer to the directory dentry containing process-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * logs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct binderfs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct ipc_namespace *ipc_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct dentry *control_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) kuid_t root_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) kgid_t root_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct binderfs_mount_opts mount_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct dentry *proc_log_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) extern const struct file_operations binder_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern char *binder_devices_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #ifdef CONFIG_ANDROID_BINDERFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) extern bool is_binderfs_device(const struct inode *inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const struct file_operations *fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) extern void binderfs_remove_file(struct dentry *dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline bool is_binderfs_device(const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline struct dentry *binderfs_create_file(struct dentry *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) const struct file_operations *fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void binderfs_remove_file(struct dentry *dentry) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_ANDROID_BINDERFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) extern int __init init_binderfs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline int __init init_binderfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int binder_stats_show(struct seq_file *m, void *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) DEFINE_SHOW_ATTRIBUTE(binder_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int binder_state_show(struct seq_file *m, void *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) DEFINE_SHOW_ATTRIBUTE(binder_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int binder_transactions_show(struct seq_file *m, void *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) DEFINE_SHOW_ATTRIBUTE(binder_transactions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int binder_transaction_log_show(struct seq_file *m, void *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct binder_transaction_log_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int debug_id_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int call_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int from_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int from_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int target_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int to_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int to_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int to_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int offsets_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int return_error_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) uint32_t return_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) uint32_t return_error_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) char context_name[BINDERFS_MAX_NAME + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct binder_transaction_log {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) atomic_t cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bool full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct binder_transaction_log_entry entry[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) enum binder_stat_types {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) BINDER_STAT_PROC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) BINDER_STAT_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) BINDER_STAT_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) BINDER_STAT_REF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) BINDER_STAT_DEATH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) BINDER_STAT_TRANSACTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) BINDER_STAT_TRANSACTION_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) BINDER_STAT_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct binder_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) atomic_t obj_created[BINDER_STAT_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) atomic_t obj_deleted[BINDER_STAT_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * struct binder_work - work enqueued on a worklist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @entry: node enqueued on list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @type: type of work to be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * There are separate work lists for proc, thread, and node (async).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct binder_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enum binder_work_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) BINDER_WORK_TRANSACTION = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) BINDER_WORK_TRANSACTION_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) BINDER_WORK_RETURN_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) BINDER_WORK_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) BINDER_WORK_DEAD_BINDER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) BINDER_WORK_DEAD_BINDER_AND_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) } type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct binder_error {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct binder_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) uint32_t cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * struct binder_node - binder node bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @debug_id: unique ID for debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @lock: lock for node fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @work: worklist element for node work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * @rb_node: element for proc->nodes tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @dead_node: element for binder_dead_nodes list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * (protected by binder_dead_nodes_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @proc: binder_proc that owns this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @refs: list of references on this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * (protected by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @internal_strong_refs: used to take strong references when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * initiating a transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @local_weak_refs: weak user refs from local process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @local_strong_refs: strong user refs from local process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @tmp_refs: temporary kernel refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * (protected by @proc->inner_lock while @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * is valid, and by binder_dead_nodes_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * if @proc is NULL. During inc/dec and node release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * it is also protected by @lock to provide safety
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * as the node dies and @proc becomes NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @ptr: userspace pointer for node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * (invariant, no lock needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @cookie: userspace cookie for node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * (invariant, no lock needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @has_strong_ref: userspace notified of strong ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @pending_strong_ref: userspace has acked notification of strong ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @has_weak_ref: userspace notified of weak ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @pending_weak_ref: userspace has acked notification of weak ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * (protected by @proc->inner_lock if @proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * and by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @has_async_transaction: async transaction to node in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * (protected by @lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @sched_policy: minimum scheduling policy for node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @accept_fds: file descriptor operations supported for node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @min_priority: minimum scheduling priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @inherit_rt: inherit RT scheduling policy from caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @txn_security_ctx: require sender's security context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @async_todo: list of async work items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Bookkeeping structure for binder nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct binder_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct binder_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct hlist_node dead_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct binder_proc *proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct hlist_head refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int internal_strong_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int local_weak_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int local_strong_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int tmp_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) binder_uintptr_t ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) binder_uintptr_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * bitfield elements protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * proc inner_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u8 has_strong_ref:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u8 pending_strong_ref:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u8 has_weak_ref:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u8 pending_weak_ref:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * invariant after initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u8 sched_policy:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u8 inherit_rt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u8 accept_fds:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u8 txn_security_ctx:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u8 min_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) bool has_async_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct list_head async_todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct binder_ref_death {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @work: worklist element for death notifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * (protected by inner_lock of the proc that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * this ref belongs to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct binder_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) binder_uintptr_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * struct binder_ref_data - binder_ref counts and id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @debug_id: unique ID for the ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @desc: unique userspace handle for ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * @strong: strong ref count (debugging only if not locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @weak: weak ref count (debugging only if not locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Structure to hold ref count and ref id information. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * the actual ref can only be accessed with a lock, this structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * is used to return information about the ref to callers of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * ref inc/dec functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct binder_ref_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) uint32_t desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int strong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * struct binder_ref - struct to track references on nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @data: binder_ref_data containing id, handle, and current refcounts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @rb_node_node: node for lookup by @node in proc's rb_tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @node_entry: list entry for node->refs list in target node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * (protected by @node->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @proc: binder_proc containing ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @node: binder_node of target node. When cleaning up a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * ref for deletion in binder_cleanup_ref, a non-NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @node indicates the node must be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * @death: pointer to death notification (ref_death) if requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * (protected by @node->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Structure to track references from procA to target node (on procB). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * structure is unsafe to access without holding @proc->outer_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct binder_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Lookups needed: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* node + proc => ref (transaction) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* desc + proc => ref (transaction, inc/dec ref) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* node => refs + procs (proc exit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct binder_ref_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct rb_node rb_node_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct rb_node rb_node_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct hlist_node node_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct binder_proc *proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct binder_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct binder_ref_death *death;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * struct binder_priority - scheduler policy and priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @sched_policy scheduler policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * The binder driver supports inheriting the following scheduler policies:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * SCHED_NORMAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * SCHED_BATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * SCHED_FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * SCHED_RR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct binder_priority {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) unsigned int sched_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * struct binder_proc - binder process bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * @proc_node: element for binder_procs list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @threads: rbtree of binder_threads in this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * @nodes: rbtree of binder nodes associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * this proc ordered by node->ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @refs_by_desc: rbtree of refs ordered by ref->desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * (protected by @outer_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @refs_by_node: rbtree of refs ordered by ref->node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * (protected by @outer_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @waiting_threads: threads currently waiting for proc work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @pid PID of group_leader of process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @tsk task_struct for group_leader of process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @deferred_work_node: element for binder_deferred_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * (protected by binder_deferred_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @deferred_work: bitmap of deferred work to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * (protected by binder_deferred_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @outstanding_txns: number of transactions to be transmitted before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * processes in freeze_wait are woken up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @is_dead: process is dead and awaiting free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * when outstanding transactions are cleaned up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @is_frozen: process is frozen and unable to service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * binder transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @sync_recv: process received sync transactions since last frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * bit 0: received sync transaction after being frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * bit 1: new pending sync transaction during freezing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @async_recv: process received async transactions since last frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @freeze_wait: waitqueue of processes waiting for all outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * transactions to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @todo: list of work for this process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @stats: per-process binder statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * (atomics, no lock needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @delivered_death: list of delivered death notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @max_threads: cap on number of binder threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @requested_threads: number of binder threads requested but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * yet started. In current implementation, can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * only be 0 or 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @requested_threads_started: number binder threads started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @tmp_ref: temporary reference to indicate proc is in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * (protected by @inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * @default_priority: default scheduler priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @debugfs_entry: debugfs node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @alloc: binder allocator bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @context: binder_context for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * @inner_lock: can nest under outer_lock and/or node lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @outer_lock: no nesting under innor or node lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Lock order: 1) outer, 2) node, 3) inner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * @binderfs_entry: process-specific binderfs log file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @oneway_spam_detection_enabled: process enabled oneway spam detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Bookkeeping structure for binder processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct binder_proc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct hlist_node proc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct rb_root threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct rb_root nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct rb_root refs_by_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct rb_root refs_by_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct list_head waiting_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct hlist_node deferred_work_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int deferred_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int outstanding_txns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bool is_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) bool is_frozen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) bool sync_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bool async_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) wait_queue_head_t freeze_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct list_head todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct binder_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct list_head delivered_death;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int max_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int requested_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int requested_threads_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int tmp_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct binder_priority default_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct dentry *debugfs_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct binder_alloc alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct binder_context *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spinlock_t inner_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) spinlock_t outer_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct dentry *binderfs_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bool oneway_spam_detection_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * struct binder_proc_ext - binder process bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @proc: element for binder_procs list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * @cred struct cred associated with the `struct file`
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * in binder_open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * (invariant after initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Extended binder_proc -- needed to add the "cred" field without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * changing the KMI for binder_proc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct binder_proc_ext {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct binder_proc proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static inline const struct cred *binder_get_cred(struct binder_proc *proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct binder_proc_ext *eproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) eproc = container_of(proc, struct binder_proc_ext, proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return eproc->cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * struct binder_thread - binder thread bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @proc: binder process for this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * (invariant after initialization)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * @rb_node: element for proc->threads rbtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @waiting_thread_node: element for @proc->waiting_threads list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @pid: PID for this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * (invariant after initialization)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @looper: bitmap of looping state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * (only accessed by this thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @looper_needs_return: looping thread needs to exit driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * (no lock needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * @transaction_stack: stack of in-progress transactions for this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * @todo: list of work to do for this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * @process_todo: whether work in @todo should be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * @return_error: transaction errors reported by this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * (only accessed by this thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * @reply_error: transaction errors reported by target thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * @wait: wait queue for thread work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @stats: per-thread statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * (atomics, no lock needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @tmp_ref: temporary reference to indicate thread is in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * (atomic since @proc->inner_lock cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * always be acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @is_dead: thread is dead and awaiting free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * when outstanding transactions are cleaned up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * (protected by @proc->inner_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @task: struct task_struct for this thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Bookkeeping structure for binder threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct binder_thread {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct binder_proc *proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct list_head waiting_thread_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int looper; /* only modified by this thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) bool looper_need_return; /* can be written by other thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct binder_transaction *transaction_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct list_head todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) bool process_todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct binder_error return_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct binder_error reply_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct binder_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) atomic_t tmp_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) bool is_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * struct binder_txn_fd_fixup - transaction fd fixup list element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * @fixup_entry: list entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * @file: struct file to be associated with new fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * @offset: offset in buffer data to this fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * List element for fd fixups in a transaction. Since file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * descriptors need to be allocated in the context of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * target process, we pass each fd to be processed in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct binder_txn_fd_fixup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct list_head fixup_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct binder_transaction {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct binder_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct binder_thread *from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct binder_transaction *from_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct binder_proc *to_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct binder_thread *to_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct binder_transaction *to_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned need_reply:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* unsigned is_dead:1; */ /* not used at the moment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct binder_priority priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct binder_priority saved_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) bool set_priority_called;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) kuid_t sender_euid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct list_head fd_fixups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) binder_uintptr_t security_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * @lock: protects @from, @to_proc, and @to_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * @from, @to_proc, and @to_thread can be set to NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * during thread teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ANDROID_VENDOR_DATA(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ANDROID_OEM_DATA_ARRAY(1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * struct binder_object - union of flat binder object types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * @hdr: generic object header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * @fbo: binder object (nodes and refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * @fdo: file descriptor object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * @bbo: binder buffer pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * @fdao: file descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * Used for type-independent object copies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct binder_object {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct binder_object_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct flat_binder_object fbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct binder_fd_object fdo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct binder_buffer_object bbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct binder_fd_array_object fdao;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) extern struct binder_transaction_log binder_transaction_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) extern struct binder_transaction_log binder_transaction_log_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #endif /* _LINUX_BINDER_INTERNAL_H */