^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Compressed RAM block device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2008, 2009, 2010 Nitin Gupta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * 2012, 2013 Minchan Kim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This code is released using a dual license strategy: BSD/GPL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * You can choose the licence that better fits your requirements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Released under the terms of 3-clause BSD License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Released under the terms of GNU General Public License Version 2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define KMSG_COMPONENT "zram"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/part_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "zram_drv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static DEFINE_IDR(zram_index_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* idr index must be protected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_MUTEX(zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int zram_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const char *default_compressor = "lzo-rle";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Module params (documentation at end) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static unsigned int num_devices = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Pages that compress to sizes equals or greater than this are stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * uncompressed in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static size_t huge_class_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static const struct block_device_operations zram_devops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static const struct block_device_operations zram_wb_devops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static void zram_free_page(struct zram *zram, size_t index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 index, int offset, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int zram_slot_trylock(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void zram_slot_lock(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void zram_slot_unlock(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline bool init_done(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return zram->disksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline struct zram *dev_to_zram(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return (struct zram *)dev_to_disk(dev)->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static unsigned long zram_get_handle(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return zram->table[index].handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) zram->table[index].handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* flag operations require table entry bit_spin_lock() being held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static bool zram_test_flag(struct zram *zram, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) enum zram_pageflags flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return zram->table[index].flags & BIT(flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void zram_set_flag(struct zram *zram, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) enum zram_pageflags flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) zram->table[index].flags |= BIT(flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void zram_clear_flag(struct zram *zram, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enum zram_pageflags flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) zram->table[index].flags &= ~BIT(flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void zram_set_element(struct zram *zram, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long element)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) zram->table[index].element = element;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static unsigned long zram_get_element(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return zram->table[index].element;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static size_t zram_get_obj_size(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void zram_set_obj_size(struct zram *zram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 index, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline bool zram_allocated(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return zram_get_obj_size(zram, index) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) zram_test_flag(zram, index, ZRAM_SAME) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) zram_test_flag(zram, index, ZRAM_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #if PAGE_SIZE != 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline bool is_partial_io(struct bio_vec *bvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return bvec->bv_len != PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline bool is_partial_io(struct bio_vec *bvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Check if request is within bounds and aligned on zram logical blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline bool valid_io_request(struct zram *zram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) sector_t start, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u64 end, bound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* unaligned request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) end = start + (size >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bound = zram->disksize >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* out of range range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (unlikely(start >= bound || end > bound || start > end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* I/O request is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *index += (*offset + bvec->bv_len) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static inline void update_used_max(struct zram *zram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) const unsigned long pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long old_max, cur_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) old_max = atomic_long_read(&zram->stats.max_used_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cur_max = old_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (pages > cur_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) old_max = atomic_long_cmpxchg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) &zram->stats.max_used_pages, cur_max, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } while (old_max != cur_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline void zram_fill_page(void *ptr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) memset_l(ptr, value, len / sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static bool page_same_filled(void *ptr, unsigned long *element)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) page = (unsigned long *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) val = page[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (val != page[last_pos])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) for (pos = 1; pos < last_pos; pos++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (val != page[pos])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) *element = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static ssize_t initstate_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) val = init_done(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return scnprintf(buf, PAGE_SIZE, "%u\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static ssize_t disksize_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static ssize_t mem_limit_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u64 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) char *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) limit = memparse(buf, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (buf == tmp) /* no chars parsed, invalid input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) down_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static ssize_t mem_used_max_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) err = kstrtoul(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (err || val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) atomic_long_set(&zram->stats.max_used_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) zs_get_total_pages(zram->mem_pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static ssize_t idle_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!sysfs_streq(buf, "all"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) for (index = 0; index < nr_pages; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * See the comment in writeback_store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (zram_allocated(zram, index) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) !zram_test_flag(zram, index, ZRAM_UNDER_WB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) zram_set_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static ssize_t writeback_limit_enable_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (kstrtoull(buf, 10, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) spin_lock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) zram->wb_limit_enable = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static ssize_t writeback_limit_enable_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) spin_lock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) val = zram->wb_limit_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return scnprintf(buf, PAGE_SIZE, "%d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static ssize_t writeback_limit_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (kstrtoull(buf, 10, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_lock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) zram->bd_wb_limit = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static ssize_t writeback_limit_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_lock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) val = zram->bd_wb_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void reset_bdev(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!zram->backing_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bdev = zram->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (zram->old_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) set_blocksize(bdev, zram->old_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* hope filp_close flush all of IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) filp_close(zram->backing_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) zram->backing_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) zram->old_block_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) zram->bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) zram->disk->fops = &zram_devops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) kvfree(zram->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) zram->bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static ssize_t backing_dev_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) file = zram->backing_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (!file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) memcpy(buf, "none\n", 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) p = file_path(file, buf, PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (IS_ERR(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ret = PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = strlen(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) memmove(buf, p, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) buf[ret++] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static ssize_t backing_dev_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) char *file_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) size_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct file *backing_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned int bitmap_sz, old_block_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long nr_pages, *bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct block_device *bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) file_name = kmalloc(PATH_MAX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!file_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) down_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pr_info("Can't setup backing device for initialized device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) strlcpy(file_name, buf, PATH_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* ignore trailing newline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sz = strlen(file_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (sz > 0 && file_name[sz - 1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) file_name[sz - 1] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) backing_dev = filp_open_block(file_name, O_RDWR|O_LARGEFILE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (IS_ERR(backing_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) err = PTR_ERR(backing_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) backing_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mapping = backing_dev->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Support only block device in this moment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!S_ISBLK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) err = -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bdev = blkdev_get_by_dev(inode->i_rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (IS_ERR(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err = PTR_ERR(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) nr_pages = i_size_read(inode) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) old_block_size = block_size(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err = set_blocksize(bdev, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) reset_bdev(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) zram->old_block_size = old_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) zram->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) zram->backing_dev = backing_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) zram->bitmap = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) zram->nr_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * With writeback feature, zram does asynchronous IO so it's no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * synchronous device so let's remove synchronous io flag. Othewise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * upper layer(e.g., swap) could wait IO completion rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * (submit and return), which will cause system sluggish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * Furthermore, when the IO function returns(e.g., swap_readpage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * upper layer expects IO was done so it could deallocate the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * freely but in fact, IO is going on so finally could cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * use-after-free when the IO is really done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) zram->disk->fops = &zram_wb_devops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pr_info("setup backing device %s\n", file_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kfree(file_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) kvfree(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (backing_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) filp_close(backing_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) kfree(file_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static unsigned long alloc_block_bdev(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned long blk_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* skip 0 bit to confuse zram.handle = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (blk_idx == zram->nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (test_and_set_bit(blk_idx, zram->bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) atomic64_inc(&zram->stats.bd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return blk_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int was_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) was_set = test_and_clear_bit(blk_idx, zram->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) WARN_ON_ONCE(!was_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) atomic64_dec(&zram->stats.bd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void zram_page_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct page *page = bio_first_page_all(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) page_endio(page, op_is_write(bio_op(bio)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) blk_status_to_errno(bio->bi_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * Returns 1 if the submission is successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned long entry, struct bio *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) bio = bio_alloc(GFP_ATOMIC, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) bio_set_dev(bio, zram->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) bio->bi_opf = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) bio->bi_end_io = zram_page_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) bio->bi_opf = parent->bi_opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) bio_chain(bio, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) #define PAGE_WB_SIG "page_index="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #define PAGE_WRITEBACK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #define HUGE_WRITEBACK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #define IDLE_WRITEBACK 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static ssize_t writeback_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) unsigned long index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct bio bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct bio_vec bio_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ssize_t ret = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) int mode, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned long blk_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (sysfs_streq(buf, "idle"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) mode = IDLE_WRITEBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) else if (sysfs_streq(buf, "huge"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) mode = HUGE_WRITEBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) index >= nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) nr_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mode = PAGE_WRITEBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto release_init_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!zram->backing_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto release_init_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) goto release_init_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) for (; nr_pages != 0; index++, nr_pages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) bvec.bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) bvec.bv_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) bvec.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_lock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (zram->wb_limit_enable && !zram->bd_wb_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!blk_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) blk_idx = alloc_block_bdev(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!blk_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!zram_allocated(zram, index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (zram_test_flag(zram, index, ZRAM_WB) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) zram_test_flag(zram, index, ZRAM_SAME) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) zram_test_flag(zram, index, ZRAM_UNDER_WB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (mode == IDLE_WRITEBACK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) !zram_test_flag(zram, index, ZRAM_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (mode == HUGE_WRITEBACK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) !zram_test_flag(zram, index, ZRAM_HUGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Clearing ZRAM_UNDER_WB is duty of caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * IOW, zram_free_page never clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) zram_set_flag(zram, index, ZRAM_UNDER_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Need for hugepage writeback racing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) zram_set_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) zram_clear_flag(zram, index, ZRAM_UNDER_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) zram_clear_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) bio_init(&bio, &bio_vec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) bio_set_dev(&bio, zram->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) bvec.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * XXX: A single page IO would be inefficient for write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * but it would be not bad as starter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) err = submit_bio_wait(&bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) zram_clear_flag(zram, index, ZRAM_UNDER_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) zram_clear_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Return last IO error unless every IO were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * not suceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) atomic64_inc(&zram->stats.bd_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * We released zram_slot_lock so need to check if the slot was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * changed. If there is freeing for the slot, we can catch it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * easily by zram_allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * A subtle case is the slot is freed/reallocated/marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * ZRAM_IDLE again. To close the race, idle_store doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Thus, we could close the race by checking ZRAM_IDLE bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!zram_allocated(zram, index) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) !zram_test_flag(zram, index, ZRAM_IDLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) zram_clear_flag(zram, index, ZRAM_UNDER_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) zram_clear_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) zram_free_page(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) zram_clear_flag(zram, index, ZRAM_UNDER_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) zram_set_flag(zram, index, ZRAM_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) zram_set_element(zram, index, blk_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) blk_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) atomic64_inc(&zram->stats.pages_stored);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) spin_lock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) spin_unlock(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (blk_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) free_block_bdev(zram, blk_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) release_init_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct zram_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) #if PAGE_SIZE != 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void zram_sync_read(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct zram_work *zw = container_of(work, struct zram_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct zram *zram = zw->zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned long entry = zw->entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct bio *bio = zw->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) read_from_bdev_async(zram, &zw->bvec, entry, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Block layer want one ->submit_bio to be active at a time, so if we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * chained IO with parent IO in same context, it's a deadlock. To avoid that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * use a worker thread context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) unsigned long entry, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct zram_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) work.bvec = *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) work.zram = zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) work.entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) work.bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) INIT_WORK_ONSTACK(&work.work, zram_sync_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) queue_work(system_unbound_wq, &work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) flush_work(&work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) destroy_work_on_stack(&work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) unsigned long entry, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) unsigned long entry, struct bio *parent, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) atomic64_inc(&zram->stats.bd_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return read_from_bdev_sync(zram, bvec, entry, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return read_from_bdev_async(zram, bvec, entry, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static inline void reset_bdev(struct zram *zram) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned long entry, struct bio *parent, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #ifdef CONFIG_ZRAM_MEMORY_TRACKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static struct dentry *zram_debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void zram_debugfs_create(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) zram_debugfs_root = debugfs_create_dir("zram", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static void zram_debugfs_destroy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) debugfs_remove_recursive(zram_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void zram_accessed(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) zram_clear_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) zram->table[index].ac_time = ktime_get_boottime();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static ssize_t read_block_state(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) char *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ssize_t index, written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct zram *zram = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) kbuf = kvmalloc(count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) kvfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) for (index = *ppos; index < nr_pages; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (!zram_allocated(zram, index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ts = ktime_to_timespec64(zram->table[index].ac_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) copied = snprintf(kbuf + written, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) "%12zd %12lld.%06lu %c%c%c%c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) index, (s64)ts.tv_sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ts.tv_nsec / NSEC_PER_USEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (count <= copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) written += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) count -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *ppos += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (copy_to_user(buf, kbuf, written))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) written = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kvfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static const struct file_operations proc_zram_block_state_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .read = read_block_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static void zram_debugfs_register(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!zram_debugfs_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) zram_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) debugfs_create_file("block_state", 0400, zram->debugfs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) zram, &proc_zram_block_state_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static void zram_debugfs_unregister(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) debugfs_remove_recursive(zram->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static void zram_debugfs_create(void) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static void zram_debugfs_destroy(void) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static void zram_accessed(struct zram *zram, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) zram_clear_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static void zram_debugfs_register(struct zram *zram) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void zram_debugfs_unregister(struct zram *zram) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * We switched to per-cpu streams and this attr is not needed anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * However, we will keep it around for some time, because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * a) we may revert per-cpu streams in the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * b) it's visible to user space and we need to follow our 2 years
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * retirement rule; but we already have a number of 'soon to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * altered' attrs, so max_comp_streams need to wait for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * layoff cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static ssize_t max_comp_streams_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static ssize_t max_comp_streams_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static ssize_t comp_algorithm_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) size_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) sz = zcomp_available_show(zram->compressor, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static ssize_t comp_algorithm_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) char compressor[ARRAY_SIZE(zram->compressor)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) size_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) strlcpy(compressor, buf, sizeof(compressor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /* ignore trailing newline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) sz = strlen(compressor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (sz > 0 && compressor[sz - 1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) compressor[sz - 1] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!zcomp_available_algorithm(compressor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) down_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pr_info("Can't change algorithm for initialized device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) strcpy(zram->compressor, compressor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static ssize_t compact_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) zs_compact(zram->mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static ssize_t io_stat_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ret = scnprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) "%8llu %8llu %8llu %8llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) (u64)atomic64_read(&zram->stats.failed_reads),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) (u64)atomic64_read(&zram->stats.failed_writes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) (u64)atomic64_read(&zram->stats.invalid_io),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) (u64)atomic64_read(&zram->stats.notify_free));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static ssize_t mm_stat_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct zs_pool_stats pool_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) u64 orig_size, mem_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) long max_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mem_used = zs_get_total_pages(zram->mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) zs_pool_stats(zram->mem_pool, &pool_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) orig_size = atomic64_read(&zram->stats.pages_stored);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) max_used = atomic_long_read(&zram->stats.max_used_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = scnprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) orig_size << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) (u64)atomic64_read(&zram->stats.compr_data_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) mem_used << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) zram->limit_pages << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) max_used << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) (u64)atomic64_read(&zram->stats.same_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) atomic_long_read(&pool_stats.pages_compacted),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) (u64)atomic64_read(&zram->stats.huge_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static ssize_t bd_stat_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ret = scnprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) "%8llu %8llu %8llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static ssize_t debug_stat_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) down_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ret = scnprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) "version: %d\n%8llu %8llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) (u64)atomic64_read(&zram->stats.writestall),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) (u64)atomic64_read(&zram->stats.miss_free));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) up_read(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static DEVICE_ATTR_RO(io_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static DEVICE_ATTR_RO(mm_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static DEVICE_ATTR_RO(bd_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static DEVICE_ATTR_RO(debug_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static void zram_meta_free(struct zram *zram, u64 disksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) size_t num_pages = disksize >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) size_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Free all pages that are still in this zram device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) for (index = 0; index < num_pages; index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) zram_free_page(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) zs_destroy_pool(zram->mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) vfree(zram->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static bool zram_meta_alloc(struct zram *zram, u64 disksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) size_t num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) num_pages = disksize >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!zram->table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) zram->mem_pool = zs_create_pool(zram->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (!zram->mem_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) vfree(zram->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!huge_class_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) huge_class_size = zs_huge_class_size(zram->mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * To protect concurrent access to the same index entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * caller should hold this table index entry's bit_spinlock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * indicate this index entry is accessing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static void zram_free_page(struct zram *zram, size_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) unsigned long handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) #ifdef CONFIG_ZRAM_MEMORY_TRACKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) zram->table[index].ac_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (zram_test_flag(zram, index, ZRAM_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) zram_clear_flag(zram, index, ZRAM_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (zram_test_flag(zram, index, ZRAM_HUGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) zram_clear_flag(zram, index, ZRAM_HUGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) atomic64_dec(&zram->stats.huge_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (zram_test_flag(zram, index, ZRAM_WB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) zram_clear_flag(zram, index, ZRAM_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) free_block_bdev(zram, zram_get_element(zram, index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * No memory is allocated for same element filled pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Simply clear same page flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (zram_test_flag(zram, index, ZRAM_SAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) zram_clear_flag(zram, index, ZRAM_SAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) atomic64_dec(&zram->stats.same_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) handle = zram_get_handle(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) zs_free(zram->mem_pool, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) atomic64_sub(zram_get_obj_size(zram, index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) &zram->stats.compr_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) atomic64_dec(&zram->stats.pages_stored);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) zram_set_handle(zram, index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) zram_set_obj_size(zram, index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) WARN_ON_ONCE(zram->table[index].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct bio *bio, bool partial_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct zcomp_strm *zstrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) unsigned long handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) void *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (zram_test_flag(zram, index, ZRAM_WB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) bvec.bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) bvec.bv_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) bvec.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return read_from_bdev(zram, &bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) zram_get_element(zram, index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) bio, partial_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) handle = zram_get_handle(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) void *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) value = handle ? zram_get_element(zram, index) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) mem = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) zram_fill_page(mem, PAGE_SIZE, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) kunmap_atomic(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) size = zram_get_obj_size(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (size != PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) zstrm = zcomp_stream_get(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (size == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dst = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) memcpy(dst, src, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) dst = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ret = zcomp_decompress(zstrm, src, size, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) zcomp_stream_put(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) zs_unmap_object(zram->mem_pool, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /* Should NEVER happen. Return bio error if it does. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (WARN_ON(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) u32 index, int offset, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (is_partial_io(bvec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* Use a temporary buffer to decompress the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (is_partial_io(bvec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) void *dst = kmap_atomic(bvec->bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) void *src = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (is_partial_io(bvec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) u32 index, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) unsigned long alloced_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) unsigned long handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) unsigned int comp_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) void *src, *dst, *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct zcomp_strm *zstrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct page *page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) unsigned long element = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) enum zram_pageflags flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) mem = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (page_same_filled(mem, &element)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) kunmap_atomic(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* Free memory associated with this sector now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) flags = ZRAM_SAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) atomic64_inc(&zram->stats.same_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) kunmap_atomic(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) compress_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) zstrm = zcomp_stream_get(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) src = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ret = zcomp_compress(zstrm, src, &comp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) zcomp_stream_put(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) pr_err("Compression failed! err=%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) zs_free(zram->mem_pool, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (comp_len >= huge_class_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) comp_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * handle allocation has 2 paths:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * a) fast path is executed with preemption disabled (for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * since we can't sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * b) slow path enables preemption and attempts to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * the page with __GFP_DIRECT_RECLAIM bit set. we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * put per-cpu compression stream and, thus, to re-do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * the compression once handle is allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * if we have a 'non-null' handle here then we are coming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * from the slow path and handle has already been allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) handle = zs_malloc(zram->mem_pool, comp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) __GFP_KSWAPD_RECLAIM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) __GFP_HIGHMEM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) __GFP_MOVABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) __GFP_CMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (!handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) zcomp_stream_put(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) atomic64_inc(&zram->stats.writestall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) handle = zs_malloc(zram->mem_pool, comp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) GFP_NOIO | __GFP_HIGHMEM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) __GFP_MOVABLE | __GFP_CMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) goto compress_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) alloced_pages = zs_get_total_pages(zram->mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) update_used_max(zram, alloced_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (zram->limit_pages && alloced_pages > zram->limit_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) zcomp_stream_put(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) zs_free(zram->mem_pool, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) src = zstrm->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (comp_len == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) src = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) memcpy(dst, src, comp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (comp_len == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) zcomp_stream_put(zram->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) zs_unmap_object(zram->mem_pool, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) atomic64_add(comp_len, &zram->stats.compr_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * Free memory associated with this sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * before overwriting unused sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) zram_free_page(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (comp_len == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) zram_set_flag(zram, index, ZRAM_HUGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) atomic64_inc(&zram->stats.huge_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) zram_set_flag(zram, index, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) zram_set_element(zram, index, element);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) zram_set_handle(zram, index, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) zram_set_obj_size(zram, index, comp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* Update stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) atomic64_inc(&zram->stats.pages_stored);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) u32 index, int offset, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) void *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct bio_vec vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) vec = *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (is_partial_io(bvec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) void *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * This is a partial IO. We need to read the full page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * before to write the changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ret = __zram_bvec_read(zram, page, index, bio, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) src = kmap_atomic(bvec->bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) dst = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) vec.bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) vec.bv_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) vec.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) ret = __zram_bvec_write(zram, &vec, index, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (is_partial_io(bvec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * zram_bio_discard - handler on discard request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * @index: physical block index in PAGE_SIZE units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * @offset: byte offset within physical block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static void zram_bio_discard(struct zram *zram, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) int offset, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) size_t n = bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * zram manages data in physical block size units. Because logical block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * size isn't identical with physical block size on some arch, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * could get a discard request pointing to a specific offset within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * certain physical block. Although we can handle this request by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * reading that physiclal block and decompressing and partially zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * and re-compressing and then re-storing it, this isn't reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * because our intent with a discard request is to save memory. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * skipping this logical block is appropriate here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (n <= (PAGE_SIZE - offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) n -= (PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) while (n >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) zram_free_page(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) atomic64_inc(&zram->stats.notify_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) n -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * Returns errno if it has some problem. Otherwise return 0 or 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * Returns 0 if IO request was done synchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * Returns 1 if IO request was successfully submitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int offset, unsigned int op, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (!op_is_write(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) atomic64_inc(&zram->stats.num_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ret = zram_bvec_read(zram, bvec, index, offset, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) flush_dcache_page(bvec->bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) atomic64_inc(&zram->stats.num_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) ret = zram_bvec_write(zram, bvec, index, offset, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) zram_slot_lock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) zram_accessed(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (!op_is_write(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) atomic64_inc(&zram->stats.failed_reads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) atomic64_inc(&zram->stats.failed_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static void __zram_make_request(struct zram *zram, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) unsigned long start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) offset = (bio->bi_iter.bi_sector &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) switch (bio_op(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) zram_bio_discard(zram, index, offset, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) start_time = bio_start_io_acct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) bio_for_each_segment(bvec, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct bio_vec bv = bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) unsigned int unwritten = bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) unwritten);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (zram_bvec_rw(zram, &bv, index, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) bio_op(bio), bio) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) bv.bv_offset += bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) unwritten -= bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) update_position(&index, &offset, &bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) } while (unwritten);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) bio_end_io_acct(bio, start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * Handler function for all zram I/O requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static blk_qc_t zram_submit_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct zram *zram = bio->bi_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!valid_io_request(zram, bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) bio->bi_iter.bi_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) atomic64_inc(&zram->stats.invalid_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) __zram_make_request(zram, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static void zram_slot_free_notify(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) zram = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) atomic64_inc(&zram->stats.notify_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (!zram_slot_trylock(zram, index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) atomic64_inc(&zram->stats.miss_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) zram_free_page(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) zram_slot_unlock(zram, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static int zram_rw_page(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct page *page, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) int offset, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) unsigned long start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (PageTransHuge(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) zram = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (!valid_io_request(zram, sector, PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) atomic64_inc(&zram->stats.invalid_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) index = sector >> SECTORS_PER_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) bv.bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) bv.bv_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) bv.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) disk_end_io_acct(bdev->bd_disk, op, start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * If I/O fails, just return error(ie, non-zero) without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * calling page_endio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * It causes resubmit the I/O with bio request by upper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * of rw_page(e.g., swap_readpage, __swap_writepage) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * bio->bi_end_io does things to handle the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * (e.g., SetPageError, set_page_dirty and extra works).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) page_endio(page, op_is_write(op), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static void zram_reset_device(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) struct zcomp *comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) u64 disksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) down_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) zram->limit_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (!init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) comp = zram->comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) disksize = zram->disksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) zram->disksize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) set_capacity(zram->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) part_stat_set_all(&zram->disk->part0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* I/O operation under all of CPU are done so let's free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) zram_meta_free(zram, disksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) memset(&zram->stats, 0, sizeof(zram->stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) zcomp_destroy(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) reset_bdev(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static ssize_t disksize_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) u64 disksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct zcomp *comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct zram *zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) disksize = memparse(buf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (!disksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) down_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (init_done(zram)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) pr_info("Cannot change disksize for initialized device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) disksize = PAGE_ALIGN(disksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (!zram_meta_alloc(zram, disksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) comp = zcomp_create(zram->compressor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (IS_ERR(comp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) pr_err("Cannot initialise %s compressing backend\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) zram->compressor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) err = PTR_ERR(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) goto out_free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) zram->comp = comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) zram->disksize = disksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) revalidate_disk_size(zram->disk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) out_free_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) zram_meta_free(zram, disksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) up_write(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static ssize_t reset_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) struct device_attribute *attr, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) unsigned short do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) ret = kstrtou16(buf, 10, &do_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (!do_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) zram = dev_to_zram(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) bdev = bdget_disk(zram->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) mutex_lock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Do not reset an active device or claimed device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (bdev->bd_openers || zram->claim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /* From now on, anyone can't open /dev/zram[0-9] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) zram->claim = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /* Make sure all the pending I/O are finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) fsync_bdev(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) zram_reset_device(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) revalidate_disk_size(zram->disk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) mutex_lock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) zram->claim = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) static int zram_open(struct block_device *bdev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) zram = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* zram was claimed to reset so open request fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (zram->claim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static const struct block_device_operations zram_devops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) .open = zram_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) .submit_bio = zram_submit_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) .swap_slot_free_notify = zram_slot_free_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) .rw_page = zram_rw_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) .owner = THIS_MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) static const struct block_device_operations zram_wb_devops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) .open = zram_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) .submit_bio = zram_submit_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) .swap_slot_free_notify = zram_slot_free_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) .owner = THIS_MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) static DEVICE_ATTR_WO(compact);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static DEVICE_ATTR_RW(disksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static DEVICE_ATTR_RO(initstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static DEVICE_ATTR_WO(reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static DEVICE_ATTR_WO(mem_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) static DEVICE_ATTR_WO(mem_used_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static DEVICE_ATTR_WO(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) static DEVICE_ATTR_RW(max_comp_streams);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static DEVICE_ATTR_RW(comp_algorithm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static DEVICE_ATTR_RW(backing_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static DEVICE_ATTR_WO(writeback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static DEVICE_ATTR_RW(writeback_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static DEVICE_ATTR_RW(writeback_limit_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static struct attribute *zram_disk_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) &dev_attr_disksize.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) &dev_attr_initstate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) &dev_attr_reset.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) &dev_attr_compact.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) &dev_attr_mem_limit.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) &dev_attr_mem_used_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) &dev_attr_idle.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) &dev_attr_max_comp_streams.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) &dev_attr_comp_algorithm.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) &dev_attr_backing_dev.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) &dev_attr_writeback.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) &dev_attr_writeback_limit.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) &dev_attr_writeback_limit_enable.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) &dev_attr_io_stat.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) &dev_attr_mm_stat.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) &dev_attr_bd_stat.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) &dev_attr_debug_stat.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static const struct attribute_group zram_disk_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) .attrs = zram_disk_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static const struct attribute_group *zram_disk_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) &zram_disk_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * Allocate and initialize new zram device. the function returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * '>= 0' device_id upon success, and negative value otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) static int zram_add(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct request_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) int ret, device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (!zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) goto out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) device_id = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) init_rwsem(&zram->init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) #ifdef CONFIG_ZRAM_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) spin_lock_init(&zram->wb_limit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) queue = blk_alloc_queue(NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (!queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) pr_err("Error allocating disk queue for device %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) goto out_free_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* gendisk structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) zram->disk = alloc_disk(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (!zram->disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) pr_err("Error allocating disk structure for device %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) goto out_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) zram->disk->major = zram_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) zram->disk->first_minor = device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) zram->disk->fops = &zram_devops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) zram->disk->queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) zram->disk->private_data = zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) set_capacity(zram->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* zram devices sort of resembles non-rotational disks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * To ensure that we always get PAGE_SIZE aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * and n*PAGE_SIZED sized I/O requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) blk_queue_logical_block_size(zram->disk->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) ZRAM_LOGICAL_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * zram_bio_discard() will clear all logical blocks if logical block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * size is identical with physical block size(PAGE_SIZE). But if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * different, we will skip discarding some parts of logical blocks in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * the part of the request range which isn't aligned to physical block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * size. So we can't ensure that all discarded logical blocks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) zram_debugfs_register(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) pr_info("Added device: %s\n", zram->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) return device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) out_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) blk_cleanup_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) out_free_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) idr_remove(&zram_index_idr, device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) kfree(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) static int zram_remove(struct zram *zram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) bdev = bdget_disk(zram->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) mutex_lock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (bdev->bd_openers || zram->claim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) zram->claim = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) zram_debugfs_unregister(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /* Make sure all the pending I/O are finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) fsync_bdev(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) zram_reset_device(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) pr_info("Removed device: %s\n", zram->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) del_gendisk(zram->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) blk_cleanup_queue(zram->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) put_disk(zram->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) kfree(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* zram-control sysfs attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * sense that reading from this file does alter the state of your system -- it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * creates a new un-initialized zram device and returns back this device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * device_id (or an error code if it fails to create a new device).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static ssize_t hot_add_show(struct class *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct class_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) mutex_lock(&zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) ret = zram_add();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) mutex_unlock(&zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static struct class_attribute class_attr_hot_add =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) __ATTR(hot_add, 0400, hot_add_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) static ssize_t hot_remove_store(struct class *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct class_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) struct zram *zram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) int ret, dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) /* dev_id is gendisk->first_minor, which is `int' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) ret = kstrtoint(buf, 10, &dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (dev_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) mutex_lock(&zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) zram = idr_find(&zram_index_idr, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (zram) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) ret = zram_remove(zram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) idr_remove(&zram_index_idr, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) mutex_unlock(&zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static CLASS_ATTR_WO(hot_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static struct attribute *zram_control_class_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) &class_attr_hot_add.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) &class_attr_hot_remove.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) ATTRIBUTE_GROUPS(zram_control_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) static struct class zram_control_class = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) .name = "zram-control",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) .class_groups = zram_control_class_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static int zram_remove_cb(int id, void *ptr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) zram_remove(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) static void destroy_devices(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) class_unregister(&zram_control_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) zram_debugfs_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) idr_destroy(&zram_index_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) unregister_blkdev(zram_major, "zram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) static int __init zram_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) zcomp_cpu_up_prepare, zcomp_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) ret = class_register(&zram_control_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) pr_err("Unable to register zram-control class\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) zram_debugfs_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) zram_major = register_blkdev(0, "zram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (zram_major <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) pr_err("Unable to get major number\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) class_unregister(&zram_control_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) while (num_devices != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) mutex_lock(&zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) ret = zram_add();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) mutex_unlock(&zram_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) num_devices--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) destroy_devices();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) static void __exit zram_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) destroy_devices();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) module_init(zram_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) module_exit(zram_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) module_param(num_devices, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) MODULE_DESCRIPTION("Compressed RAM Block Device");