^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) International Business Machines Corp., 2000-2005
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Portions Copyright (C) Christoph Hellwig, 2001-2002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "jfs_incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "jfs_superblock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "jfs_filsys.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "jfs_metapage.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "jfs_txnmgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "jfs_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifdef CONFIG_JFS_STATISTICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) uint pagealloc; /* # of page allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) uint pagefree; /* # of page frees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) uint lockwait; /* # of sleeping lock_metapage() calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) } mpStat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static inline void unlock_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) clear_bit_unlock(META_locked, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) wake_up(&mp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline void __lock_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) INCREMENT(mpStat.lockwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) add_wait_queue_exclusive(&mp->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (metapage_locked(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unlock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) lock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) } while (trylock_metapage(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) remove_wait_queue(&mp->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Must have mp->page locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline void lock_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (trylock_metapage(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __lock_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define METAPOOL_MIN_PAGES 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct kmem_cache *metapage_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static mempool_t *metapage_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #if MPS_PER_PAGE > 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct meta_anchor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int mp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) atomic_t io_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct metapage *mp[MPS_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline struct metapage *page_to_mp(struct page *page, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return mp_anchor(page)->mp[offset >> L2PSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static inline int insert_metapage(struct page *page, struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct meta_anchor *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int l2mp_blocks; /* log2 blocks per metapage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) a = mp_anchor(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) set_page_private(page, (unsigned long)a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) SetPagePrivate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) a->mp_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) a->mp[index] = mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void remove_metapage(struct page *page, struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct meta_anchor *a = mp_anchor(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) BUG_ON(a->mp[index] != mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) a->mp[index] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (--a->mp_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) set_page_private(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ClearPagePrivate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline void inc_io(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) atomic_inc(&mp_anchor(page)->io_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline void dec_io(struct page *page, void (*handler) (struct page *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (atomic_dec_and_test(&mp_anchor(page)->io_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) handler(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline struct metapage *page_to_mp(struct page *page, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline int insert_metapage(struct page *page, struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) set_page_private(page, (unsigned long)mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) SetPagePrivate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline void remove_metapage(struct page *page, struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) set_page_private(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ClearPagePrivate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define inc_io(page) do {} while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define dec_io(page, handler) handler(page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mp->lid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mp->lsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mp->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mp->clsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mp->log = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) init_waitqueue_head(&mp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void free_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mempool_free(mp, metapage_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int __init metapage_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Allocate the metapage structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (metapage_cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) metapage_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (metapage_mempool == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kmem_cache_destroy(metapage_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void metapage_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) mempool_destroy(metapage_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) kmem_cache_destroy(metapage_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static inline void drop_metapage(struct page *page, struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) test_bit(META_io, &mp->flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) remove_metapage(page, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) INCREMENT(mpStat.pagefree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) free_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Metapage address space operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int xflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) s64 xaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (lblock >= file_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (lblock + *len > file_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *len = file_blocks - lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (inode->i_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if ((rc == 0) && *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) lblock = (sector_t)xaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) lblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) } /* else no mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void last_read_complete(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!PageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void metapage_read_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct page *page = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) printk(KERN_ERR "metapage_read_end_io: I/O error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dec_io(page, last_read_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void remove_from_logsync(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct jfs_log *log = mp->log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * This can race. Recheck that log hasn't been set to null, and after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * acquiring logsync lock, recheck lsn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) LOGSYNC_LOCK(log, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (mp->lsn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) mp->log = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) mp->lsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) mp->clsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) log->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) list_del(&mp->synclist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) LOGSYNC_UNLOCK(log, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void last_write_complete(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct metapage *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mp = page_to_mp(page, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (mp && test_bit(META_io, &mp->flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (mp->lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) remove_from_logsync(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) clear_bit(META_io, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * I'd like to call drop_metapage here, but I don't think it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * safe unless I have the page locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void metapage_write_end_io(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct page *page = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) BUG_ON(!PagePrivate(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) printk(KERN_ERR "metapage_write_end_io: I/O error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dec_io(page, last_write_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int metapage_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int block_offset; /* block offset of mp within page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int xlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct metapage *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int redirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) sector_t lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int nr_underway = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sector_t pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) sector_t next_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) sector_t page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned long bio_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long bio_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int bad_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) page_start = (sector_t)page->index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) (PAGE_SHIFT - inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mp = page_to_mp(page, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!mp || !test_bit(META_dirty, &mp->flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) redirty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Make sure this page isn't blocked indefinitely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * If the journal isn't undergoing I/O, push it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) jfs_flush_journal(mp->log, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) clear_bit(META_dirty, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) set_bit(META_io, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) block_offset = offset >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) lblock = page_start + block_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (xlen && lblock == next_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Contiguous, in memory & on disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) len = min(xlen, blocks_per_mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) xlen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) bio_bytes += len << inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Not contiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (bio_add_page(bio, page, bio_bytes, bio_offset) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) bio_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto add_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Increment counter before submitting i/o to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * count from hitting zero before we're through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) inc_io(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!bio->bi_iter.bi_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) goto dump_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) nr_underway++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) inc_io(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) pblock = metapage_get_blocks(inode, lblock, &xlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!pblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * We already called inc_io(), but can't cancel it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * with dec_io() until we're done with the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bad_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) bio = bio_alloc(GFP_NOFS, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) bio_set_dev(bio, inode->i_sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) bio->bi_end_io = metapage_write_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) bio->bi_private = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Don't call bio_add_page yet, we may add to this vec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) bio_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bio_bytes = len << inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) xlen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) next_block = lblock + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto add_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!bio->bi_iter.bi_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto dump_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) nr_underway++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (redirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (bad_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (nr_underway == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) add_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* We should never reach here, since we're only adding one vec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dump_bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 4, bio, sizeof(*bio), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dec_io(page, last_write_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) while (bad_blocks--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dec_io(page, last_write_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int metapage_readpage(struct file *fp, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int block_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int blocks_per_page = i_blocks_per_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) sector_t page_start; /* address of page in fs blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) sector_t pblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int xlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) BUG_ON(!PageLocked(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) page_start = (sector_t)page->index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) (PAGE_SHIFT - inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) block_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) while (block_offset < blocks_per_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) xlen = blocks_per_page - block_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pblock = metapage_get_blocks(inode, page_start + block_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) &xlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (pblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) insert_metapage(page, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) inc_io(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) bio = bio_alloc(GFP_NOFS, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) bio_set_dev(bio, inode->i_sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) bio->bi_iter.bi_sector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pblock << (inode->i_blkbits - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) bio->bi_end_io = metapage_read_end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) bio->bi_private = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bio_set_op_attrs(bio, REQ_OP_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) len = xlen << inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) offset = block_offset << inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (bio_add_page(bio, page, len, offset) < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto add_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) block_offset += xlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) block_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) add_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dec_io(page, last_read_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct metapage *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) mp = page_to_mp(page, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) jfs_info("metapage_releasepage: mp = 0x%p", mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (mp->count || mp->nohomeok ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) test_bit(META_dirty, &mp->flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) jfs_info("count = %ld, nohomeok = %d", mp->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mp->nohomeok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (mp->lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) remove_from_logsync(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) remove_metapage(page, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) INCREMENT(mpStat.pagefree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) free_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void metapage_invalidatepage(struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) BUG_ON(offset || length < PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) BUG_ON(PageWriteback(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) metapage_releasepage(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) const struct address_space_operations jfs_metapage_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) .readpage = metapage_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) .writepage = metapage_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) .releasepage = metapage_releasepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .invalidatepage = metapage_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) .set_page_dirty = __set_page_dirty_nobuffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned int size, int absolute,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned long new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int l2BlocksPerPage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int l2bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct metapage *mp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned long page_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned long page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) inode->i_ino, lblock, absolute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) l2bsize = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) l2BlocksPerPage = PAGE_SHIFT - l2bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) page_index = lblock >> l2BlocksPerPage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if ((page_offset + size) > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) jfs_err("MetaData crosses page boundary!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) jfs_err("lblock = %lx, size = %d", lblock, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (absolute)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * If an nfs client tries to read an inode that is larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * than any existing inodes, we may try to read past the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * end of the inode map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if ((lblock << inode->i_blkbits) >= inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (new && (PSIZE == PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) page = grab_cache_page(mapping, page_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) jfs_err("grab_cache_page failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) page = read_mapping_page(mapping, page_index, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (IS_ERR(page) || !PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) jfs_err("read_mapping_page failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) mp = page_to_mp(page, page_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (mp->logical_size != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) jfs_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) "get_mp->logical_size != size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) jfs_err("logical_size = %d, size = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mp->logical_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mp->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) lock_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (test_bit(META_discard, &mp->flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) jfs_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) "using a discarded metapage\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) discard_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) clear_bit(META_discard, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) INCREMENT(mpStat.pagealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) mp = alloc_metapage(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mp->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mp->sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) mp->flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) mp->xflag = COMMIT_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) mp->count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) mp->nohomeok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) mp->logical_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) mp->data = page_address(page) + page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mp->index = lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (unlikely(insert_metapage(page, mp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) free_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) lock_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) jfs_info("zeroing mp = 0x%p", mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) memset(mp->data, 0, PSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) void grab_metapage(struct metapage * mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) jfs_info("grab_metapage: mp = 0x%p", mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) get_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) lock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) mp->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) lock_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) unlock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) void force_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct page *page = mp->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) jfs_info("force_metapage: mp = 0x%p", mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) set_bit(META_forcewrite, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) clear_bit(META_sync, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (write_one_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) jfs_error(mp->sb, "write_one_page() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) clear_bit(META_forcewrite, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) void hold_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) lock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) void put_metapage(struct metapage *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (mp->count || mp->nohomeok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Someone else will release this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unlock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) get_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mp->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) lock_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unlock_page(mp->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) release_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) void release_metapage(struct metapage * mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct page *page = mp->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) BUG_ON(!page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) unlock_metapage(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) assert(mp->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (--mp->count || mp->nohomeok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (test_bit(META_dirty, &mp->flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (test_bit(META_sync, &mp->flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) clear_bit(META_sync, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (write_one_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) jfs_error(mp->sb, "write_one_page() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) lock_page(page); /* write_one_page unlocks the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) } else if (mp->lsn) /* discard_metapage doesn't remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) remove_from_logsync(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Try to keep metapages from using up too much memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) drop_metapage(page, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) void __invalidate_metapages(struct inode *ip, s64 addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) sector_t lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int BlocksPerPage = 1 << l2BlocksPerPage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* All callers are interested in block device's mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct address_space *mapping =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct metapage *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Mark metapages to discard. They will eventually be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * released, but should not be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) lblock += BlocksPerPage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) mp = page_to_mp(page, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (mp->index < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (mp->index >= addr + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) clear_bit(META_dirty, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) set_bit(META_discard, &mp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (mp->lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) remove_from_logsync(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) #ifdef CONFIG_JFS_STATISTICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int jfs_mpstat_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) "JFS Metapage statistics\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) "=======================\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) "page allocations = %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) "page frees = %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) "lock waits = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mpStat.pagealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) mpStat.pagefree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mpStat.lockwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #endif