Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3)  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #include "xfs_message.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) kmem_alloc(size_t size, xfs_km_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 	int	retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) 	gfp_t	lflags = kmem_flags_convert(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 	void	*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) 	trace_kmem_alloc(size, flags, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) 		ptr = kmalloc(size, lflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) 		if (ptr || (flags & KM_MAYFAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) 			return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) 		if (!(++retries % 100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) 			xfs_err(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	"%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 				current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 				(unsigned int)size, __func__, lflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 		congestion_wait(BLK_RW_ASYNC, HZ/50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)  * __vmalloc() will allocate data pages and auxiliary structures (e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)  * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)  * we need to tell memory reclaim that we are in such a context via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)  * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)  * and potentially deadlocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 	unsigned nofs_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 	void	*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 	gfp_t	lflags = kmem_flags_convert(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 	if (flags & KM_NOFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 		nofs_flag = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	ptr = __vmalloc(size, lflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 	if (flags & KM_NOFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 		memalloc_nofs_restore(nofs_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)  * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)  * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)  * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)  * aligned region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 	void	*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 	trace_kmem_alloc_io(size, flags, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 	if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 		align_mask = PAGE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 	ptr = kmem_alloc(size, flags | KM_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 	if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 		if (!((uintptr_t)ptr & align_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 			return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 		kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 	return __kmem_vmalloc(size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kmem_alloc_large(size_t size, xfs_km_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 	void	*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 	trace_kmem_alloc_large(size, flags, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 	ptr = kmem_alloc(size, flags | KM_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 	if (ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 		return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 	return __kmem_vmalloc(size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }