^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Storage object read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * detect wake up events generated by the unlocking of pages in which we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * interested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - we use this to detect read completion of backing pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - the caller holds the waitqueue lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) int sync, void *_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct cachefiles_one_read *monitor =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) container_of(wait, struct cachefiles_one_read, monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct fscache_retrieval *op = monitor->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct wait_page_key *key = _key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct page *page = wait->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) ASSERT(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) _enter("{%lu},%u,%d,{%p,%u}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) monitor->netfs_page->index, mode, sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) key->page, key->bit_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (key->page != page || key->bit_nr != PG_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) _debug("--- monitor %p %lx ---", page, page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!PageUptodate(page) && !PageError(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* unlocked, not uptodate and not erronous? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) _debug("page probably truncated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* remove from the waitqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) list_del(&wait->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* move onto the action list and queue for FS-Cache thread pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ASSERT(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* We need to temporarily bump the usage count as we don't own a ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * here otherwise cachefiles_read_copier() may free the op between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * monitor being enqueued on the op->to_do list and the op getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * enqueued on the work queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) fscache_get_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) object = container_of(op->op.object, struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) spin_lock(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) list_add_tail(&monitor->op_link, &op->to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) fscache_enqueue_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) spin_unlock(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * handle a probably truncated page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * - check to see if the page is still relevant and reissue the read if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * must wait again and 0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int cachefiles_read_reissue(struct cachefiles_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct cachefiles_one_read *monitor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct page *backpage = monitor->back_page, *backpage2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) _enter("{ino=%lx},{%lx,%lx}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) d_backing_inode(object->backer)->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) backpage->index, backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* skip if the page was truncated away completely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (backpage->mapping != bmapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) _leave(" = -ENODATA [mapping]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) backpage2 = find_get_page(bmapping, backpage->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!backpage2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) _leave(" = -ENODATA [gone]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (backpage != backpage2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) put_page(backpage2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) _leave(" = -ENODATA [different]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* the page is still there and we already have a ref on it, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * need a second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) put_page(backpage2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) INIT_LIST_HEAD(&monitor->op_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) add_page_wait_queue(backpage, &monitor->monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (trylock_page(backpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (PageError(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) goto unlock_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (PageUptodate(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) goto unlock_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) _debug("reissue read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ret = bmapping->a_ops->readpage(NULL, backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* but the page may have been read before the monitor was installed, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * the monitor may miss the event - so we have to ensure that we do get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * one in such a case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (trylock_page(backpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) _debug("jumpstart %p {%lx}", backpage, backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unlock_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* it'll reappear on the todo list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) _leave(" = -EINPROGRESS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unlock_discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unlock_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_lock_irq(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) list_del(&monitor->op_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_unlock_irq(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * copy data from backing pages to netfs pages to complete a read operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * - driven by FS-Cache's thread pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void cachefiles_read_copier(struct fscache_operation *_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct cachefiles_one_read *monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct fscache_retrieval *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int error, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) op = container_of(_op, struct fscache_retrieval, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) object = container_of(op->op.object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) max = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) spin_lock_irq(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) while (!list_empty(&op->to_do)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) monitor = list_entry(op->to_do.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct cachefiles_one_read, op_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) list_del(&monitor->op_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) spin_unlock_irq(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) _debug("- copy {%lu}", monitor->back_page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) recheck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (test_bit(FSCACHE_COOKIE_INVALIDATING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) &object->fscache.cookie->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) error = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) } else if (PageUptodate(monitor->back_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) copy_highpage(monitor->netfs_page, monitor->back_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) fscache_mark_page_cached(monitor->op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) monitor->netfs_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) } else if (!PageError(monitor->back_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* the page has probably been truncated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) error = cachefiles_read_reissue(object, monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (error == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto recheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cachefiles_io_error_obj(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) "Readpage failed on backing file %lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) (unsigned long) monitor->back_page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) put_page(monitor->back_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) fscache_end_io(op, monitor->netfs_page, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) put_page(monitor->netfs_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) kfree(monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* let the thread pool have some air occasionally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) max--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (max < 0 || need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!list_empty(&op->to_do))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) fscache_enqueue_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) _leave(" [maxed out]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_lock_irq(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_unlock_irq(&object->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * read the corresponding page to the given set from the backing file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * - an uncertain page is simply discarded, to be tried again another time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct page *netpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct cachefiles_one_read *monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct address_space *bmapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct page *newpage, *backpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) _debug("read back %p{%lu,%d}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) netpage, netpage->index, page_count(netpage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!monitor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) monitor->netfs_page = netpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) monitor->op = fscache_get_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* attempt to get hold of the backing page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bmapping = d_backing_inode(object->backer)->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) newpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) backpage = find_get_page(bmapping, netpage->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (backpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto backing_page_already_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!newpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) newpage = __page_cache_alloc(cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!newpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto nomem_monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ret = add_to_page_cache_lru(newpage, bmapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) netpage->index, cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto installed_new_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (ret != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) goto nomem_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* we've installed a new backing page, so now we need to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * it reading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) installed_new_backing_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) _debug("- new %p", newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) backpage = newpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) newpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) read_backing_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ret = bmapping->a_ops->readpage(NULL, backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* set the monitor to transfer the data across */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) monitor_backing_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) _debug("- monitor add");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* install the monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) get_page(monitor->netfs_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) get_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) monitor->back_page = backpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) monitor->monitor.private = backpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) add_page_wait_queue(backpage, &monitor->monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) monitor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* but the page may have been read before the monitor was installed, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * the monitor may miss the event - so we have to ensure that we do get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * one in such a case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (trylock_page(backpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) _debug("jumpstart %p {%lx}", backpage, backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unlock_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* if the backing page is already present, it can be in one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * three states: read in progress, read failed or read okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) backing_page_already_present:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) _debug("- present");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (newpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) put_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) newpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (PageError(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto io_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (PageUptodate(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto backing_page_already_uptodate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!trylock_page(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto monitor_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) _debug("read %p {%lx}", backpage, backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) goto read_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* the backing page is already up to date, attach the netfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * page to the pagecache and LRU and copy the data across */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) backing_page_already_uptodate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) _debug("- uptodate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) fscache_mark_page_cached(op, netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) copy_highpage(netpage, backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) fscache_end_io(op, netpage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) _debug("success");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (backpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) put_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) fscache_put_retrieval(monitor->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) kfree(monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) read_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) _debug("read error %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) io_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cachefiles_io_error_obj(object, "Page read error on backing file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) nomem_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) put_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) nomem_monitor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) fscache_put_retrieval(monitor->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) kfree(monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) _leave(" = -ENOMEM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * read a page from the cache or allocate a block in which to store it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * - cache withdrawal is prevented by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * - returns -EINTR if interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * - returns -ENOMEM if ran out of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * - returns -ENOBUFS if no buffers can be made available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * - returns -ENOBUFS if page is beyond EOF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * - if the page is backed by a block in the cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * - a read will be started which will call the callback on completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * - 0 will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * - else if the page is unbacked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * - the metadata will be retained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * - -ENODATA will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct cachefiles_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sector_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int ret, ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) object = container_of(op->op.object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) cache = container_of(object->fscache.cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct cachefiles_cache, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) _enter("{%p},{%lx},,,", object, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (!object->backer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) goto enobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) inode = d_backing_inode(object->backer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ASSERT(S_ISREG(inode->i_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* calculate the shift required to use bmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) op->op.flags |= FSCACHE_OP_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) op->op.processor = cachefiles_read_copier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* we assume the absence or presence of the first block is a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * enough indication for the page as a whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * - TODO: don't use bmap() for this as it is _not_ actually good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * enough for this as it doesn't indicate errors, but it's all we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * got for the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) block = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) block <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ret2 = bmap(inode, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ASSERT(ret2 == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) _debug("%llx -> %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) (unsigned long long) (page->index << shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (unsigned long long) block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* submit the apparently valid page to the backing fs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * read from disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = cachefiles_read_backing_file_one(object, op, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) } else if (cachefiles_has_space(cache, 0, 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* there's space in the cache we can use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) fscache_mark_page_cached(op, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) goto enobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) enobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) _leave(" = -ENOBUFS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * read the corresponding pages to the given set from the backing file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * - any uncertain pages are simply discarded, to be tried again another time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int cachefiles_read_backing_file(struct cachefiles_object *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct cachefiles_one_read *monitor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) list_for_each_entry_safe(netpage, _n, list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) list_del(&netpage->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) _debug("read back %p{%lu,%d}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) netpage, netpage->index, page_count(netpage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!monitor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) monitor->op = fscache_get_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) init_waitqueue_func_entry(&monitor->monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) cachefiles_read_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) backpage = find_get_page(bmapping, netpage->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (backpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto backing_page_already_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!newpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) newpage = __page_cache_alloc(cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!newpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = add_to_page_cache_lru(newpage, bmapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) netpage->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto installed_new_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (ret != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* we've installed a new backing page, so now we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * to start it reading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) installed_new_backing_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) _debug("- new %p", newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) backpage = newpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) newpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) reread_backing_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = bmapping->a_ops->readpage(NULL, backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* add the netfs page to the pagecache and LRU, and set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * monitor to transfer the data across */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) monitor_backing_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) _debug("- monitor add");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = add_to_page_cache_lru(netpage, op->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) netpage->index, cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (ret == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) put_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) backpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) put_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) netpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* install a monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) get_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) monitor->netfs_page = netpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) get_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) monitor->back_page = backpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) monitor->monitor.private = backpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) add_page_wait_queue(backpage, &monitor->monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) monitor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* but the page may have been read before the monitor was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * installed, so the monitor may miss the event - so we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * ensure that we do get one in such a case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (trylock_page(backpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) _debug("2unlock %p {%lx}", backpage, backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unlock_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) put_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) backpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) put_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) netpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* if the backing page is already present, it can be in one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * three states: read in progress, read failed or read okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) backing_page_already_present:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) _debug("- present %p", backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (PageError(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto io_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (PageUptodate(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto backing_page_already_uptodate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) _debug("- not ready %p{%lx}", backpage, backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!trylock_page(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto monitor_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (PageError(backpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) _debug("error %lx", backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) unlock_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto io_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (PageUptodate(backpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto backing_page_already_uptodate_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* we've locked a page that's neither up to date nor erroneous,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * so we need to attempt to read it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto reread_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* the backing page is already up to date, attach the netfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * page to the pagecache and LRU and copy the data across */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) backing_page_already_uptodate_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) _debug("uptodate %lx", backpage->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unlock_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) backing_page_already_uptodate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) _debug("- uptodate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = add_to_page_cache_lru(netpage, op->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) netpage->index, cachefiles_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (ret == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) put_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) backpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) put_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) netpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) copy_highpage(netpage, backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) put_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) backpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) fscache_mark_page_cached(op, netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* the netpage is unlocked and marked up to date here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) fscache_end_io(op, netpage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) put_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) netpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) netpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) _debug("out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* tidy up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (newpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) put_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (netpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) put_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (backpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) put_page(backpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) fscache_put_retrieval(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) kfree(monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) list_for_each_entry_safe(netpage, _n, list, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) list_del(&netpage->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) put_page(netpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) _debug("nomem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) goto record_page_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) read_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) _debug("read error %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) goto record_page_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) io_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) cachefiles_io_error_obj(object, "Page read error on backing file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) record_page_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * read a list of pages from the cache or allocate blocks in which to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct list_head *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned *nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct cachefiles_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct list_head backpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct pagevec pagevec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct page *page, *_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) unsigned shift, nrbackpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int ret, ret2, space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) object = container_of(op->op.object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cache = container_of(object->fscache.cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct cachefiles_cache, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) _enter("{OBJ%x,%d},,%d,,",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) object->fscache.debug_id, atomic_read(&op->op.usage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!object->backer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto all_enobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) space = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) space = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) inode = d_backing_inode(object->backer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ASSERT(S_ISREG(inode->i_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* calculate the shift required to use bmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pagevec_init(&pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) op->op.flags |= FSCACHE_OP_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) op->op.processor = cachefiles_read_copier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) INIT_LIST_HEAD(&backpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) nrbackpages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ret = space ? -ENODATA : -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) list_for_each_entry_safe(page, _n, pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) sector_t block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* we assume the absence or presence of the first block is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * good enough indication for the page as a whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * - TODO: don't use bmap() for this as it is _not_ actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * good enough for this as it doesn't indicate errors, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * it's all we've got for the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) block = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) block <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret2 = bmap(inode, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ASSERT(ret2 == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) _debug("%llx -> %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) (unsigned long long) (page->index << shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) (unsigned long long) block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* we have data - add it to the list to give to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * backing fs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) list_move(&page->lru, &backpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) (*nr_pages)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) nrbackpages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) } else if (space && pagevec_add(&pagevec, page) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) fscache_mark_pages_cached(op, &pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (pagevec_count(&pagevec) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) fscache_mark_pages_cached(op, &pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (list_empty(pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* submit the apparently valid pages to the backing fs to be read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (nrbackpages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret2 = cachefiles_read_backing_file(object, op, &backpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (ret2 == -ENOMEM || ret2 == -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) _leave(" = %d [nr=%u%s]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret, *nr_pages, list_empty(pages) ? " empty" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) all_enobufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) fscache_retrieval_complete(op, *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * allocate a block in the cache in which to store a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * - cache withdrawal is prevented by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * - returns -EINTR if interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * - returns -ENOMEM if ran out of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * - returns -ENOBUFS if no buffers can be made available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * - returns -ENOBUFS if page is beyond EOF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * - otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * - the metadata will be retained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * - 0 will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int cachefiles_allocate_page(struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct cachefiles_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) object = container_of(op->op.object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cache = container_of(object->fscache.cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct cachefiles_cache, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) _enter("%p,{%lx},", object, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ret = cachefiles_has_space(cache, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) fscache_mark_page_cached(op, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) fscache_retrieval_complete(op, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * allocate blocks in the cache in which to store a set of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * - cache withdrawal is prevented by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * - returns -EINTR if interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * - returns -ENOMEM if ran out of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * - returns -ENOBUFS if some buffers couldn't be made available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * - returns -ENOBUFS if some pages are beyond EOF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * - otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * - -ENODATA will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * - metadata will be retained for any page marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int cachefiles_allocate_pages(struct fscache_retrieval *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct list_head *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) unsigned *nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct cachefiles_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct pagevec pagevec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) object = container_of(op->op.object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) cache = container_of(object->fscache.cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct cachefiles_cache, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) _enter("%p,,,%d,", object, *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = cachefiles_has_space(cache, 0, *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pagevec_init(&pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) list_for_each_entry(page, pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (pagevec_add(&pagevec, page) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) fscache_mark_pages_cached(op, &pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (pagevec_count(&pagevec) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) fscache_mark_pages_cached(op, &pagevec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) fscache_retrieval_complete(op, *nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * request a page be stored in the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * - cache withdrawal is prevented by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * - this request may be ignored if there's no cache block available, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * case -ENOBUFS will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * - if the op is in progress, 0 will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int cachefiles_write_page(struct fscache_storage *op, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct cachefiles_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) loff_t pos, eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) int ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ASSERT(op != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ASSERT(page != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) object = container_of(op->op.object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) _enter("%p,%p{%lx},,,", object, page, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!object->backer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) _leave(" = -ENOBUFS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ASSERT(d_is_reg(object->backer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) cache = container_of(object->fscache.cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct cachefiles_cache, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) pos = (loff_t)page->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* We mustn't write more data than we have, so we have to beware of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * partial page at EOF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) eof = object->fscache.store_limit_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (pos >= eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* write the page to the backing filesystem and let it store it in its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * own time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) path.mnt = cache->mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) path.dentry = object->backer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) goto error_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (eof & ~PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (eof - pos < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) _debug("cut short %llx to %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) pos, eof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) len = eof - pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ASSERTCMP(pos + len, ==, eof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) data = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = kernel_write(file, data, len, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (ret != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) goto error_eio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) _leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) error_eio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) error_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) cachefiles_io_error_obj(object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) "Write page to backing file failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) _leave(" = -ENOBUFS [%d]", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * detach a backing block from a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * - cache withdrawal is prevented by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) __releases(&object->fscache.cookie->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct cachefiles_object *object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) object = container_of(_object, struct cachefiles_object, fscache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) _enter("%p,{%lu}", object, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) spin_unlock(&object->fscache.cookie->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }