^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* binder_alloc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Android IPC Subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2007-2017 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/rtmutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list_lru.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "binder_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "binder_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <trace/hooks/binder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct list_lru binder_alloc_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static DEFINE_MUTEX(binder_alloc_mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) BINDER_DEBUG_USER_ERROR = 1U << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) module_param_named(debug_mask, binder_alloc_debug_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define binder_alloc_debug(mask, x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (binder_alloc_debug_mask & mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pr_info_ratelimited(x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return list_entry(buffer->entry.next, struct binder_buffer, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return list_entry(buffer->entry.prev, struct binder_buffer, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (list_is_last(&buffer->entry, &alloc->buffers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return alloc->buffer + alloc->buffer_size - buffer->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return binder_buffer_next(buffer)->user_data - buffer->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void binder_insert_free_buffer(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct binder_buffer *new_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rb_node **p = &alloc->free_buffers.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) size_t buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) size_t new_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) BUG_ON(!new_buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) "%d: add free buffer, size %zd, at %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) alloc->pid, new_buffer_size, new_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) buffer = rb_entry(parent, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) BUG_ON(!buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (new_buffer_size < buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rb_link_node(&new_buffer->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void binder_insert_allocated_buffer_locked(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct binder_alloc *alloc, struct binder_buffer *new_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct rb_node **p = &alloc->allocated_buffers.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) BUG_ON(new_buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) buffer = rb_entry(parent, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) BUG_ON(buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (new_buffer->user_data < buffer->user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else if (new_buffer->user_data > buffer->user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) rb_link_node(&new_buffer->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct binder_buffer *binder_alloc_prepare_to_free_locked(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) uintptr_t user_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct rb_node *n = alloc->allocated_buffers.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void __user *uptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) uptr = (void __user *)user_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) buffer = rb_entry(n, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) BUG_ON(buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (uptr < buffer->user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) else if (uptr > buffer->user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Guard against user threads attempting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * free the buffer when in use by kernel or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * after it's already been freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!buffer->allow_user_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return ERR_PTR(-EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) buffer->allow_user_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * binder_alloc_prepare_to_free() - get buffer given user ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @user_ptr: User pointer to buffer data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Validate userspace pointer to buffer data and return buffer corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * that user pointer. Search the rb tree for buffer that matches user data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Return: Pointer to buffer or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) uintptr_t user_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void __user *start, void __user *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void __user *page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long user_page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct binder_lru_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct vm_area_struct *vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct mm_struct *mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bool need_mm = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) "%d: %s pages %pK-%pK\n", alloc->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) allocate ? "allocate" : "free", start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (end <= start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) trace_binder_update_page_range(alloc, allocate, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (allocate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto free_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!page->page_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) need_mm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mm = alloc->vma_vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) vma = alloc->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!vma && need_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) alloc->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto err_no_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bool on_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) size_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) index = (page_addr - alloc->buffer) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) page = &alloc->pages[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (page->page_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) trace_binder_alloc_lru_start(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) WARN_ON(!on_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) trace_binder_alloc_lru_end(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (WARN_ON(!vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto err_page_ptr_cleared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) trace_binder_alloc_page_start(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) page->page_ptr = alloc_page(GFP_KERNEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) __GFP_HIGHMEM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!page->page_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pr_err("%d: binder_alloc_buf failed for page at %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) alloc->pid, page_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto err_alloc_page_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) page->alloc = alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) INIT_LIST_HEAD(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) user_page_addr = (uintptr_t)page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) alloc->pid, user_page_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) goto err_vm_insert_page_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (index + 1 > alloc->pages_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) alloc->pages_high = index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) trace_binder_alloc_page_end(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) free_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) size_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) index = (page_addr - alloc->buffer) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) page = &alloc->pages[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) trace_binder_free_lru_start(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ret = list_lru_add(&binder_alloc_lru, &page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) WARN_ON(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) trace_binder_free_lru_end(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (page_addr == start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) err_vm_insert_page_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __free_page(page->page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) page->page_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) err_alloc_page_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) err_page_ptr_cleared:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (page_addr == start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) err_no_vma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return vma ? -ENOMEM : -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) alloc->vma_vm_mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * If we see alloc->vma is not NULL, buffer data structures set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * completely. Look at smp_rmb side binder_alloc_get_vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * We also want to guarantee new alloc->vma_vm_mm is always visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * if alloc->vma is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) alloc->vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static inline struct vm_area_struct *binder_alloc_get_vma(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct vm_area_struct *vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (alloc->vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Look at description in binder_alloc_set_vma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) vma = alloc->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Find the amount and size of buffers allocated by the current caller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * The idea is that once we cross the threshold, whoever is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * for the low async space is likely to try to send another async txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * and at some point we'll catch them in the act. This is more efficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * than keeping a map per pid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) size_t total_alloc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) size_t num_buffers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (n = rb_first(&alloc->allocated_buffers); n != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) buffer = rb_entry(n, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (buffer->pid != pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!buffer->async_transaction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) + sizeof(struct binder_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) num_buffers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Warn if this pid has more than 50 transactions, or more than 50% of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * async space (which is 25% of total buffer size). Oneway spam is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * detected when the threshold is exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) alloc->pid, pid, num_buffers, total_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!alloc->oneway_spam_detected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) alloc->oneway_spam_detected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct binder_buffer *binder_alloc_new_buf_locked(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) size_t data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) size_t offsets_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) size_t extra_buffers_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int is_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct rb_node *n = alloc->free_buffers.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) size_t buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct rb_node *best_fit = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void __user *has_page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void __user *end_page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) size_t size, data_offsets_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!binder_alloc_get_vma(alloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) "%d: binder_alloc_buf, no vma\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) alloc->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return ERR_PTR(-ESRCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) data_offsets_size = ALIGN(data_size, sizeof(void *)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ALIGN(offsets_size, sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) "%d: got transaction with invalid size %zd-%zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) alloc->pid, data_size, offsets_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (size < data_offsets_size || size < extra_buffers_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) "%d: got transaction with invalid extra_buffers_size %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) alloc->pid, extra_buffers_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) trace_android_vh_binder_alloc_new_buf_locked(size, alloc, is_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (is_async &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) alloc->free_async_space < size + sizeof(struct binder_buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "%d: binder_alloc_buf size %zd failed, no async space left\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) alloc->pid, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return ERR_PTR(-ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Pad 0-size buffers so they get assigned unique addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) size = max(size, sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) buffer = rb_entry(n, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) BUG_ON(!buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (size < buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) best_fit = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } else if (size > buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) best_fit = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (best_fit == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) size_t allocated_buffers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) size_t largest_alloc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) size_t total_alloc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) size_t free_buffers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) size_t largest_free_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) size_t total_free_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) for (n = rb_first(&alloc->allocated_buffers); n != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) buffer = rb_entry(n, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) allocated_buffers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) total_alloc_size += buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (buffer_size > largest_alloc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) largest_alloc_size = buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) for (n = rb_first(&alloc->free_buffers); n != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) buffer = rb_entry(n, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) free_buffers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) total_free_size += buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (buffer_size > largest_free_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) largest_free_size = buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "%d: binder_alloc_buf size %zd failed, no address space\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) alloc->pid, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) total_alloc_size, allocated_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) largest_alloc_size, total_free_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) free_buffers, largest_free_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return ERR_PTR(-ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (n == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) alloc->pid, size, buffer, buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) has_page_addr = (void __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) WARN_ON(n && buffer_size != size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) end_page_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (end_page_addr > has_page_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) end_page_addr = has_page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ret = binder_update_page_range(alloc, 1, (void __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (buffer_size != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct binder_buffer *new_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!new_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pr_err("%s: %d failed to alloc new buffer struct\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __func__, alloc->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) goto err_alloc_buf_struct_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) new_buffer->user_data = (u8 __user *)buffer->user_data + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) list_add(&new_buffer->entry, &buffer->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) new_buffer->free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) binder_insert_free_buffer(alloc, new_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) rb_erase(best_fit, &alloc->free_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) buffer->free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) buffer->allow_user_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) binder_insert_allocated_buffer_locked(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) "%d: binder_alloc_buf size %zd got %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) alloc->pid, size, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) buffer->data_size = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) buffer->offsets_size = offsets_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) buffer->async_transaction = is_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) buffer->extra_buffers_size = extra_buffers_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) buffer->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) buffer->oneway_spam_suspect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (is_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) alloc->free_async_space -= size + sizeof(struct binder_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) "%d: binder_alloc_buf size %zd async free %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) alloc->pid, size, alloc->free_async_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (alloc->free_async_space < alloc->buffer_size / 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Start detecting spammers once we have less than 20%
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * of async space left (which is less than 10% of total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * buffer size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) alloc->oneway_spam_detected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) err_alloc_buf_struct_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) binder_update_page_range(alloc, 0, (void __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) PAGE_ALIGN((uintptr_t)buffer->user_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) end_page_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * binder_alloc_new_buf() - Allocate a new binder buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @data_size: size of user data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * @offsets_size: user specified buffer offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * @extra_buffers_size: size of extra space for meta-data (eg, security context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * @is_async: buffer for async transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @pid: pid to attribute allocation to (used for debugging)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Allocate a new buffer given the requested sizes. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * the kernel version of the buffer pointer. The size allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * is the sum of the three given sizes (each rounded up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * pointer-sized boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Return: The allocated buffer or %NULL if error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) size_t data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) size_t offsets_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) size_t extra_buffers_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int is_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) extra_buffers_size, is_async, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void __user *buffer_start_page(struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return (void __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void binder_delete_free_buffer(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct binder_buffer *prev, *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bool to_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) BUG_ON(alloc->buffers.next == &buffer->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) prev = binder_buffer_prev(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) BUG_ON(!prev->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) to_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) "%d: merge free, buffer %pK share page with %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) alloc->pid, buffer->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) prev->user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!list_is_last(&buffer->entry, &alloc->buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) next = binder_buffer_next(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (buffer_start_page(next) == buffer_start_page(buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) to_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) "%d: merge free, buffer %pK share page with %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) alloc->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) buffer->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) next->user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (PAGE_ALIGNED(buffer->user_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) "%d: merge free, buffer start %pK is page aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) alloc->pid, buffer->user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) to_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (to_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) alloc->pid, buffer->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) prev->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) next ? next->user_data : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) binder_update_page_range(alloc, 0, buffer_start_page(buffer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) buffer_start_page(buffer) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) list_del(&buffer->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static void binder_free_buf_locked(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) size_t size, buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) size = ALIGN(buffer->data_size, sizeof(void *)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ALIGN(buffer->offsets_size, sizeof(void *)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ALIGN(buffer->extra_buffers_size, sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) alloc->pid, buffer, size, buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) BUG_ON(buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) BUG_ON(size > buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) BUG_ON(buffer->transaction != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) BUG_ON(buffer->user_data < alloc->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (buffer->async_transaction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) "%d: binder_free_buf size %zd async free %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) alloc->pid, size, alloc->free_async_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) binder_update_page_range(alloc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) (void __user *)(((uintptr_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) buffer->user_data + buffer_size) & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) buffer->free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!list_is_last(&buffer->entry, &alloc->buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct binder_buffer *next = binder_buffer_next(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (next->free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rb_erase(&next->rb_node, &alloc->free_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) binder_delete_free_buffer(alloc, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (alloc->buffers.next != &buffer->entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct binder_buffer *prev = binder_buffer_prev(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (prev->free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) binder_delete_free_buffer(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) rb_erase(&prev->rb_node, &alloc->free_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) buffer = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) binder_insert_free_buffer(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void binder_alloc_clear_buf(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct binder_buffer *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * binder_alloc_free_buf() - free a binder buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * @buffer: kernel pointer to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * Free the buffer allocated via binder_alloc_new_buf()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) void binder_alloc_free_buf(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * We could eliminate the call to binder_alloc_clear_buf()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * from binder_alloc_deferred_release() by moving this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * binder_alloc_free_buf_locked(). However, that could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * increase contention for the alloc mutex if clear_on_free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * is used frequently for large buffers. The mutex is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * needed for correctness here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (buffer->clear_on_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) binder_alloc_clear_buf(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) buffer->clear_on_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) binder_free_buf_locked(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * binder_alloc_mmap_handler() - map virtual address space for proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @alloc: alloc structure for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * @vma: vma passed to mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Called by binder_mmap() to initialize the space specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * vma for allocating binder buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * 0 = success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * -EBUSY = address space already mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * -ENOMEM = failed to map memory to given address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int binder_alloc_mmap_handler(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) const char *failure_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) mutex_lock(&binder_alloc_mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (alloc->buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) failure_string = "already mapped";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) goto err_already_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) SZ_4M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) mutex_unlock(&binder_alloc_mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) alloc->buffer = (void __user *)vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sizeof(alloc->pages[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (alloc->pages == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) failure_string = "alloc page array";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) goto err_alloc_pages_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) failure_string = "alloc buffer struct";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto err_alloc_buf_struct_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) buffer->user_data = alloc->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) list_add(&buffer->entry, &alloc->buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) buffer->free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) binder_insert_free_buffer(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) alloc->free_async_space = alloc->buffer_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) binder_alloc_set_vma(alloc, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) mmgrab(alloc->vma_vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) err_alloc_buf_struct_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) kfree(alloc->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) alloc->pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) err_alloc_pages_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) alloc->buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) mutex_lock(&binder_alloc_mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) alloc->buffer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) err_already_mapped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) mutex_unlock(&binder_alloc_mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) "%s: %d %lx-%lx %s failed %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) alloc->pid, vma->vm_start, vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) failure_string, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void binder_alloc_deferred_release(struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int buffers, page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct binder_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) buffers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) BUG_ON(alloc->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) while ((n = rb_first(&alloc->allocated_buffers))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) buffer = rb_entry(n, struct binder_buffer, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* Transaction should already have been freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) BUG_ON(buffer->transaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (buffer->clear_on_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) binder_alloc_clear_buf(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) buffer->clear_on_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) binder_free_buf_locked(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) buffers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) while (!list_empty(&alloc->buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) buffer = list_first_entry(&alloc->buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct binder_buffer, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) WARN_ON(!buffer->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) list_del(&buffer->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) WARN_ON_ONCE(!list_empty(&alloc->buffers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) page_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (alloc->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) void __user *page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bool on_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!alloc->pages[i].page_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) on_lru = list_lru_del(&binder_alloc_lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) &alloc->pages[i].lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) page_addr = alloc->buffer + i * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) "%s: %d: page %d at %pK %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) __func__, alloc->pid, i, page_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) on_lru ? "on lru" : "active");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) __free_page(alloc->pages[i].page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) page_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) kfree(alloc->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (alloc->vma_vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) mmdrop(alloc->vma_vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) "%s: %d buffers %d, pages %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) __func__, alloc->pid, buffers, page_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void print_binder_buffer(struct seq_file *m, const char *prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) prefix, buffer->debug_id, buffer->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) buffer->data_size, buffer->offsets_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) buffer->extra_buffers_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) buffer->transaction ? "active" : "delivered");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * binder_alloc_print_allocated() - print buffer info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @m: seq_file for output via seq_printf()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Prints information about every buffer associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * the binder_alloc state to the given seq_file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) void binder_alloc_print_allocated(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) print_binder_buffer(m, " buffer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rb_entry(n, struct binder_buffer, rb_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * binder_alloc_print_pages() - print page usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @m: seq_file for output via seq_printf()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) void binder_alloc_print_pages(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct binder_lru_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int lru = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Make sure the binder_alloc is fully initialized, otherwise we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * read inconsistent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (binder_alloc_get_vma(alloc) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) page = &alloc->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!page->page_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) else if (list_empty(&page->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) lru++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * binder_alloc_get_allocated_count() - return count of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * Return: count of allocated buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mutex_lock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * binder_alloc_vma_close() - invalidate address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Called from binder_vma_close() when releasing address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Clears alloc->vma to prevent new incoming transactions from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * allocating more buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) void binder_alloc_vma_close(struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) binder_alloc_set_vma(alloc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * binder_alloc_free_page() - shrinker callback to free pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * @item: item to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * @lock: lock protecting the item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @cb_arg: callback argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * Called from list_lru_walk() in binder_shrink_scan() to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * up pages when the system is under memory pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) enum lru_status binder_alloc_free_page(struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct list_lru_one *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) spinlock_t *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) void *cb_arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __must_hold(lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct mm_struct *mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct binder_lru_page *page = container_of(item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct binder_lru_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct binder_alloc *alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) uintptr_t page_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) size_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) alloc = page->alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!mutex_trylock(&alloc->mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto err_get_alloc_mutex_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!page->page_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto err_page_already_freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) index = page - alloc->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) mm = alloc->vma_vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!mmget_not_zero(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) goto err_mmget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!mmap_read_trylock(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto err_mmap_read_lock_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) vma = binder_alloc_get_vma(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) list_lru_isolate(lru, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) trace_binder_unmap_user_start(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) zap_page_range(vma, page_addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) trace_binder_unmap_user_end(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) mmput_async(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) trace_binder_unmap_kernel_start(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) __free_page(page->page_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) page->page_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) trace_binder_unmap_kernel_end(alloc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return LRU_REMOVED_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) err_mmap_read_lock_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) mmput_async(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) err_mmget:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) err_page_already_freed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) mutex_unlock(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) err_get_alloc_mutex_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return LRU_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned long ret = list_lru_count(&binder_alloc_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) NULL, sc->nr_to_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static struct shrinker binder_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) .count_objects = binder_shrink_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) .scan_objects = binder_shrink_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .seeks = DEFAULT_SEEKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * binder_alloc_init() - called by binder_open() for per-proc initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * Called from binder_open() to initialize binder_alloc fields for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * new binder proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) void binder_alloc_init(struct binder_alloc *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) alloc->pid = current->group_leader->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mutex_init(&alloc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) INIT_LIST_HEAD(&alloc->buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int binder_alloc_shrinker_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int ret = list_lru_init(&binder_alloc_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = register_shrinker(&binder_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) list_lru_destroy(&binder_alloc_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * check_buffer() - verify that buffer/offset is safe to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * @buffer: binder buffer to be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * @offset: offset into @buffer data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * @bytes: bytes to access from offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * Check that the @offset/@bytes are within the size of the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * @buffer and that the buffer is currently active and not freeable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * Offsets must also be multiples of sizeof(u32). The kernel is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * allowed to touch the buffer in two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * 1) when the buffer is being created:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * (buffer->free == 0 && buffer->allow_user_free == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * 2) when the buffer is being torn down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * (buffer->free == 0 && buffer->transaction == NULL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Return: true if the buffer is safe to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static inline bool check_buffer(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct binder_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) binder_size_t offset, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return buffer_size >= bytes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) offset <= buffer_size - bytes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) IS_ALIGNED(offset, sizeof(u32)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) !buffer->free &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) (!buffer->allow_user_free || !buffer->transaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * binder_alloc_get_page() - get kernel pointer for given buffer offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * @buffer: binder buffer to be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * @buffer_offset: offset into @buffer data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * @pgoffp: address to copy final page offset to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Lookup the struct page corresponding to the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * at @buffer_offset into @buffer->user_data. If @pgoffp is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * NULL, the byte-offset into the page is written there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * The caller is responsible to ensure that the offset points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * to a valid address within the @buffer and that @buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * not freeable by the user. Since it can't be freed, we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * guaranteed that the corresponding elements of @alloc->pages[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * cannot change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * Return: struct page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct binder_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) binder_size_t buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) pgoff_t *pgoffp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) binder_size_t buffer_space_offset = buffer_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) (buffer->user_data - alloc->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) size_t index = buffer_space_offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct binder_lru_page *lru_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) lru_page = &alloc->pages[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) *pgoffp = pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return lru_page->page_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * binder_alloc_clear_buf() - zero out buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * @buffer: binder buffer to be cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * memset the given buffer to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static void binder_alloc_clear_buf(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct binder_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) size_t bytes = binder_alloc_buffer_size(alloc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) binder_size_t buffer_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) while (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) void *kptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) page = binder_alloc_get_page(alloc, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) buffer_offset, &pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) kptr = kmap(page) + pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) memset(kptr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) bytes -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) buffer_offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * @alloc: binder_alloc for this proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @buffer: binder buffer to be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * @buffer_offset: offset into @buffer data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * @from: userspace pointer to source buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * @bytes: bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * Copy bytes from source userspace to target buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * Return: bytes remaining to be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct binder_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) binder_size_t buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) const void __user *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (!check_buffer(alloc, buffer, buffer_offset, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) while (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) void *kptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) page = binder_alloc_get_page(alloc, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) buffer_offset, &pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) kptr = kmap(page) + pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ret = copy_from_user(kptr, from, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return bytes - size + ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) bytes -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) from += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) buffer_offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) bool to_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct binder_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) binder_size_t buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* All copies must be 32-bit aligned and 32-bit size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!check_buffer(alloc, buffer, buffer_offset, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) while (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) void *tmpptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) void *base_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) page = binder_alloc_get_page(alloc, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) buffer_offset, &pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) base_ptr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) tmpptr = base_ptr + pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (to_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) memcpy(tmpptr, ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) memcpy(ptr, tmpptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * kunmap_atomic() takes care of flushing the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * if this device has VIVT cache arch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) kunmap_atomic(base_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) bytes -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) pgoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) ptr = ptr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) buffer_offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct binder_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) binder_size_t buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) src, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) void *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct binder_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) binder_size_t buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) dest, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)